octeontx2-af: Check whether ipolicers exists
[sfrench/cifs-2.6.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21
22 #define DEBUGFS_DIR_NAME "octeontx2"
23
24 enum {
25         CGX_STAT0,
26         CGX_STAT1,
27         CGX_STAT2,
28         CGX_STAT3,
29         CGX_STAT4,
30         CGX_STAT5,
31         CGX_STAT6,
32         CGX_STAT7,
33         CGX_STAT8,
34         CGX_STAT9,
35         CGX_STAT10,
36         CGX_STAT11,
37         CGX_STAT12,
38         CGX_STAT13,
39         CGX_STAT14,
40         CGX_STAT15,
41         CGX_STAT16,
42         CGX_STAT17,
43         CGX_STAT18,
44 };
45
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48         TX_UCAST        = 0x0,
49         TX_BCAST        = 0x1,
50         TX_MCAST        = 0x2,
51         TX_DROP         = 0x3,
52         TX_OCTS         = 0x4,
53         TX_STATS_ENUM_LAST,
54 };
55
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58         RX_OCTS         = 0x0,
59         RX_UCAST        = 0x1,
60         RX_BCAST        = 0x2,
61         RX_MCAST        = 0x3,
62         RX_DROP         = 0x4,
63         RX_DROP_OCTS    = 0x5,
64         RX_FCS          = 0x6,
65         RX_ERR          = 0x7,
66         RX_DRP_BCAST    = 0x8,
67         RX_DRP_MCAST    = 0x9,
68         RX_DRP_L3BCAST  = 0xa,
69         RX_DRP_L3MCAST  = 0xb,
70         RX_STATS_ENUM_LAST,
71 };
72
73 static char *cgx_rx_stats_fields[] = {
74         [CGX_STAT0]     = "Received packets",
75         [CGX_STAT1]     = "Octets of received packets",
76         [CGX_STAT2]     = "Received PAUSE packets",
77         [CGX_STAT3]     = "Received PAUSE and control packets",
78         [CGX_STAT4]     = "Filtered DMAC0 (NIX-bound) packets",
79         [CGX_STAT5]     = "Filtered DMAC0 (NIX-bound) octets",
80         [CGX_STAT6]     = "Packets dropped due to RX FIFO full",
81         [CGX_STAT7]     = "Octets dropped due to RX FIFO full",
82         [CGX_STAT8]     = "Error packets",
83         [CGX_STAT9]     = "Filtered DMAC1 (NCSI-bound) packets",
84         [CGX_STAT10]    = "Filtered DMAC1 (NCSI-bound) octets",
85         [CGX_STAT11]    = "NCSI-bound packets dropped",
86         [CGX_STAT12]    = "NCSI-bound octets dropped",
87 };
88
89 static char *cgx_tx_stats_fields[] = {
90         [CGX_STAT0]     = "Packets dropped due to excessive collisions",
91         [CGX_STAT1]     = "Packets dropped due to excessive deferral",
92         [CGX_STAT2]     = "Multiple collisions before successful transmission",
93         [CGX_STAT3]     = "Single collisions before successful transmission",
94         [CGX_STAT4]     = "Total octets sent on the interface",
95         [CGX_STAT5]     = "Total frames sent on the interface",
96         [CGX_STAT6]     = "Packets sent with an octet count < 64",
97         [CGX_STAT7]     = "Packets sent with an octet count == 64",
98         [CGX_STAT8]     = "Packets sent with an octet count of 65–127",
99         [CGX_STAT9]     = "Packets sent with an octet count of 128-255",
100         [CGX_STAT10]    = "Packets sent with an octet count of 256-511",
101         [CGX_STAT11]    = "Packets sent with an octet count of 512-1023",
102         [CGX_STAT12]    = "Packets sent with an octet count of 1024-1518",
103         [CGX_STAT13]    = "Packets sent with an octet count of > 1518",
104         [CGX_STAT14]    = "Packets sent to a broadcast DMAC",
105         [CGX_STAT15]    = "Packets sent to the multicast DMAC",
106         [CGX_STAT16]    = "Transmit underflow and were truncated",
107         [CGX_STAT17]    = "Control/PAUSE packets sent",
108 };
109
110 static char *rpm_rx_stats_fields[] = {
111         "Octets of received packets",
112         "Octets of received packets with out error",
113         "Received packets with alignment errors",
114         "Control/PAUSE packets received",
115         "Packets received with Frame too long Errors",
116         "Packets received with a1nrange length Errors",
117         "Received packets",
118         "Packets received with FrameCheckSequenceErrors",
119         "Packets received with VLAN header",
120         "Error packets",
121         "Packets received with unicast DMAC",
122         "Packets received with multicast DMAC",
123         "Packets received with broadcast DMAC",
124         "Dropped packets",
125         "Total frames received on interface",
126         "Packets received with an octet count < 64",
127         "Packets received with an octet count == 64",
128         "Packets received with an octet count of 65â\80\93127",
129         "Packets received with an octet count of 128-255",
130         "Packets received with an octet count of 256-511",
131         "Packets received with an octet count of 512-1023",
132         "Packets received with an octet count of 1024-1518",
133         "Packets received with an octet count of > 1518",
134         "Oversized Packets",
135         "Jabber Packets",
136         "Fragmented Packets",
137         "CBFC(class based flow control) pause frames received for class 0",
138         "CBFC pause frames received for class 1",
139         "CBFC pause frames received for class 2",
140         "CBFC pause frames received for class 3",
141         "CBFC pause frames received for class 4",
142         "CBFC pause frames received for class 5",
143         "CBFC pause frames received for class 6",
144         "CBFC pause frames received for class 7",
145         "CBFC pause frames received for class 8",
146         "CBFC pause frames received for class 9",
147         "CBFC pause frames received for class 10",
148         "CBFC pause frames received for class 11",
149         "CBFC pause frames received for class 12",
150         "CBFC pause frames received for class 13",
151         "CBFC pause frames received for class 14",
152         "CBFC pause frames received for class 15",
153         "MAC control packets received",
154 };
155
156 static char *rpm_tx_stats_fields[] = {
157         "Total octets sent on the interface",
158         "Total octets transmitted OK",
159         "Control/Pause frames sent",
160         "Total frames transmitted OK",
161         "Total frames sent with VLAN header",
162         "Error Packets",
163         "Packets sent to unicast DMAC",
164         "Packets sent to the multicast DMAC",
165         "Packets sent to a broadcast DMAC",
166         "Packets sent with an octet count == 64",
167         "Packets sent with an octet count of 65â\80\93127",
168         "Packets sent with an octet count of 128-255",
169         "Packets sent with an octet count of 256-511",
170         "Packets sent with an octet count of 512-1023",
171         "Packets sent with an octet count of 1024-1518",
172         "Packets sent with an octet count of > 1518",
173         "CBFC(class based flow control) pause frames transmitted for class 0",
174         "CBFC pause frames transmitted for class 1",
175         "CBFC pause frames transmitted for class 2",
176         "CBFC pause frames transmitted for class 3",
177         "CBFC pause frames transmitted for class 4",
178         "CBFC pause frames transmitted for class 5",
179         "CBFC pause frames transmitted for class 6",
180         "CBFC pause frames transmitted for class 7",
181         "CBFC pause frames transmitted for class 8",
182         "CBFC pause frames transmitted for class 9",
183         "CBFC pause frames transmitted for class 10",
184         "CBFC pause frames transmitted for class 11",
185         "CBFC pause frames transmitted for class 12",
186         "CBFC pause frames transmitted for class 13",
187         "CBFC pause frames transmitted for class 14",
188         "CBFC pause frames transmitted for class 15",
189         "MAC control packets sent",
190         "Total frames sent on the interface"
191 };
192
193 enum cpt_eng_type {
194         CPT_AE_TYPE = 1,
195         CPT_SE_TYPE = 2,
196         CPT_IE_TYPE = 3,
197 };
198
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200                                                 blk_addr, NDC_AF_CONST) & 0xFF)
201
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)     \
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208         return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211         .owner          = THIS_MODULE, \
212         .open           = rvu_dbg_open_##name, \
213         .read           = seq_read, \
214         .write          = rvu_dbg_##write_op, \
215         .llseek         = seq_lseek, \
216         .release        = single_release, \
217 }
218
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221         .owner = THIS_MODULE, \
222         .open = simple_open, \
223         .read = rvu_dbg_##read_op, \
224         .write = rvu_dbg_##write_op \
225 }
226
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228
229 /* Dumps current provisioning status of all RVU block LFs */
230 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
231                                           char __user *buffer,
232                                           size_t count, loff_t *ppos)
233 {
234         int index, off = 0, flag = 0, go_back = 0, len = 0;
235         struct rvu *rvu = filp->private_data;
236         int lf, pf, vf, pcifunc;
237         struct rvu_block block;
238         int bytes_not_copied;
239         int lf_str_size = 12;
240         int buf_size = 2048;
241         char *lfs;
242         char *buf;
243
244         /* don't allow partial reads */
245         if (*ppos != 0)
246                 return 0;
247
248         buf = kzalloc(buf_size, GFP_KERNEL);
249         if (!buf)
250                 return -ENOSPC;
251
252         lfs = kzalloc(lf_str_size, GFP_KERNEL);
253         if (!lfs) {
254                 kfree(buf);
255                 return -ENOMEM;
256         }
257         off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
258                           "pcifunc");
259         for (index = 0; index < BLK_COUNT; index++)
260                 if (strlen(rvu->hw->block[index].name)) {
261                         off += scnprintf(&buf[off], buf_size - 1 - off,
262                                          "%-*s", lf_str_size,
263                                          rvu->hw->block[index].name);
264                 }
265         off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
266         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
267                 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
268                         pcifunc = pf << 10 | vf;
269                         if (!pcifunc)
270                                 continue;
271
272                         if (vf) {
273                                 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
274                                 go_back = scnprintf(&buf[off],
275                                                     buf_size - 1 - off,
276                                                     "%-*s", lf_str_size, lfs);
277                         } else {
278                                 sprintf(lfs, "PF%d", pf);
279                                 go_back = scnprintf(&buf[off],
280                                                     buf_size - 1 - off,
281                                                     "%-*s", lf_str_size, lfs);
282                         }
283
284                         off += go_back;
285                         for (index = 0; index < BLKTYPE_MAX; index++) {
286                                 block = rvu->hw->block[index];
287                                 if (!strlen(block.name))
288                                         continue;
289                                 len = 0;
290                                 lfs[len] = '\0';
291                                 for (lf = 0; lf < block.lf.max; lf++) {
292                                         if (block.fn_map[lf] != pcifunc)
293                                                 continue;
294                                         flag = 1;
295                                         len += sprintf(&lfs[len], "%d,", lf);
296                                 }
297
298                                 if (flag)
299                                         len--;
300                                 lfs[len] = '\0';
301                                 off += scnprintf(&buf[off], buf_size - 1 - off,
302                                                  "%-*s", lf_str_size, lfs);
303                                 if (!strlen(lfs))
304                                         go_back += lf_str_size;
305                         }
306                         if (!flag)
307                                 off -= go_back;
308                         else
309                                 flag = 0;
310                         off--;
311                         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
312                 }
313         }
314
315         bytes_not_copied = copy_to_user(buffer, buf, off);
316         kfree(lfs);
317         kfree(buf);
318
319         if (bytes_not_copied)
320                 return -EFAULT;
321
322         *ppos = off;
323         return off;
324 }
325
326 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
327
328 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
329 {
330         struct rvu *rvu = filp->private;
331         struct pci_dev *pdev = NULL;
332         struct mac_ops *mac_ops;
333         char cgx[10], lmac[10];
334         struct rvu_pfvf *pfvf;
335         int pf, domain, blkid;
336         u8 cgx_id, lmac_id;
337         u16 pcifunc;
338
339         domain = 2;
340         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
341         /* There can be no CGX devices at all */
342         if (!mac_ops)
343                 return 0;
344         seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
345                    mac_ops->name);
346         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
347                 if (!is_pf_cgxmapped(rvu, pf))
348                         continue;
349
350                 pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
351                 if (!pdev)
352                         continue;
353
354                 cgx[0] = 0;
355                 lmac[0] = 0;
356                 pcifunc = pf << 10;
357                 pfvf = rvu_get_pfvf(rvu, pcifunc);
358
359                 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
360                         blkid = 0;
361                 else
362                         blkid = 1;
363
364                 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
365                                     &lmac_id);
366                 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
367                 sprintf(lmac, "LMAC%d", lmac_id);
368                 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
369                            dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
370         }
371         return 0;
372 }
373
374 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
375
376 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
377                                 u16 *pcifunc)
378 {
379         struct rvu_block *block;
380         struct rvu_hwinfo *hw;
381
382         hw = rvu->hw;
383         block = &hw->block[blkaddr];
384
385         if (lf < 0 || lf >= block->lf.max) {
386                 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
387                          block->lf.max - 1);
388                 return false;
389         }
390
391         *pcifunc = block->fn_map[lf];
392         if (!*pcifunc) {
393                 dev_warn(rvu->dev,
394                          "This LF is not attached to any RVU PFFUNC\n");
395                 return false;
396         }
397         return true;
398 }
399
400 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
401 {
402         char *buf;
403
404         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
405         if (!buf)
406                 return;
407
408         if (!pfvf->aura_ctx) {
409                 seq_puts(m, "Aura context is not initialized\n");
410         } else {
411                 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
412                                         pfvf->aura_ctx->qsize);
413                 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
414                 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
415         }
416
417         if (!pfvf->pool_ctx) {
418                 seq_puts(m, "Pool context is not initialized\n");
419         } else {
420                 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
421                                         pfvf->pool_ctx->qsize);
422                 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
423                 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
424         }
425         kfree(buf);
426 }
427
428 /* The 'qsize' entry dumps current Aura/Pool context Qsize
429  * and each context's current enable/disable status in a bitmap.
430  */
431 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
432                                  int blktype)
433 {
434         void (*print_qsize)(struct seq_file *filp,
435                             struct rvu_pfvf *pfvf) = NULL;
436         struct dentry *current_dir;
437         struct rvu_pfvf *pfvf;
438         struct rvu *rvu;
439         int qsize_id;
440         u16 pcifunc;
441         int blkaddr;
442
443         rvu = filp->private;
444         switch (blktype) {
445         case BLKTYPE_NPA:
446                 qsize_id = rvu->rvu_dbg.npa_qsize_id;
447                 print_qsize = print_npa_qsize;
448                 break;
449
450         case BLKTYPE_NIX:
451                 qsize_id = rvu->rvu_dbg.nix_qsize_id;
452                 print_qsize = print_nix_qsize;
453                 break;
454
455         default:
456                 return -EINVAL;
457         }
458
459         if (blktype == BLKTYPE_NPA) {
460                 blkaddr = BLKADDR_NPA;
461         } else {
462                 current_dir = filp->file->f_path.dentry->d_parent;
463                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
464                                    BLKADDR_NIX1 : BLKADDR_NIX0);
465         }
466
467         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
468                 return -EINVAL;
469
470         pfvf = rvu_get_pfvf(rvu, pcifunc);
471         print_qsize(filp, pfvf);
472
473         return 0;
474 }
475
476 static ssize_t rvu_dbg_qsize_write(struct file *filp,
477                                    const char __user *buffer, size_t count,
478                                    loff_t *ppos, int blktype)
479 {
480         char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
481         struct seq_file *seqfile = filp->private_data;
482         char *cmd_buf, *cmd_buf_tmp, *subtoken;
483         struct rvu *rvu = seqfile->private;
484         struct dentry *current_dir;
485         int blkaddr;
486         u16 pcifunc;
487         int ret, lf;
488
489         cmd_buf = memdup_user(buffer, count + 1);
490         if (IS_ERR(cmd_buf))
491                 return -ENOMEM;
492
493         cmd_buf[count] = '\0';
494
495         cmd_buf_tmp = strchr(cmd_buf, '\n');
496         if (cmd_buf_tmp) {
497                 *cmd_buf_tmp = '\0';
498                 count = cmd_buf_tmp - cmd_buf + 1;
499         }
500
501         cmd_buf_tmp = cmd_buf;
502         subtoken = strsep(&cmd_buf, " ");
503         ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
504         if (cmd_buf)
505                 ret = -EINVAL;
506
507         if (!strncmp(subtoken, "help", 4) || ret < 0) {
508                 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
509                 goto qsize_write_done;
510         }
511
512         if (blktype == BLKTYPE_NPA) {
513                 blkaddr = BLKADDR_NPA;
514         } else {
515                 current_dir = filp->f_path.dentry->d_parent;
516                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
517                                    BLKADDR_NIX1 : BLKADDR_NIX0);
518         }
519
520         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
521                 ret = -EINVAL;
522                 goto qsize_write_done;
523         }
524         if (blktype  == BLKTYPE_NPA)
525                 rvu->rvu_dbg.npa_qsize_id = lf;
526         else
527                 rvu->rvu_dbg.nix_qsize_id = lf;
528
529 qsize_write_done:
530         kfree(cmd_buf_tmp);
531         return ret ? ret : count;
532 }
533
534 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
535                                        const char __user *buffer,
536                                        size_t count, loff_t *ppos)
537 {
538         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
539                                             BLKTYPE_NPA);
540 }
541
542 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
543 {
544         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
545 }
546
547 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
548
549 /* Dumps given NPA Aura's context */
550 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
551 {
552         struct npa_aura_s *aura = &rsp->aura;
553         struct rvu *rvu = m->private;
554
555         seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
556
557         seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
558                    aura->ena, aura->pool_caching);
559         seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
560                    aura->pool_way_mask, aura->avg_con);
561         seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
562                    aura->pool_drop_ena, aura->aura_drop_ena);
563         seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
564                    aura->bp_ena, aura->aura_drop);
565         seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
566                    aura->shift, aura->avg_level);
567
568         seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
569                    (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
570
571         seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
572                    (u64)aura->limit, aura->bp, aura->fc_ena);
573
574         if (!is_rvu_otx2(rvu))
575                 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
576         seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
577                    aura->fc_up_crossing, aura->fc_stype);
578         seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
579
580         seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
581
582         seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
583                    aura->pool_drop, aura->update_time);
584         seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
585                    aura->err_int, aura->err_int_ena);
586         seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
587                    aura->thresh_int, aura->thresh_int_ena);
588         seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
589                    aura->thresh_up, aura->thresh_qint_idx);
590         seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
591
592         seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
593         if (!is_rvu_otx2(rvu))
594                 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
595 }
596
597 /* Dumps given NPA Pool's context */
598 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
599 {
600         struct npa_pool_s *pool = &rsp->pool;
601         struct rvu *rvu = m->private;
602
603         seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
604
605         seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
606                    pool->ena, pool->nat_align);
607         seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
608                    pool->stack_caching, pool->stack_way_mask);
609         seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
610                    pool->buf_offset, pool->buf_size);
611
612         seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
613                    pool->stack_max_pages, pool->stack_pages);
614
615         seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
616
617         seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
618                    pool->stack_offset, pool->shift, pool->avg_level);
619         seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
620                    pool->avg_con, pool->fc_ena, pool->fc_stype);
621         seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
622                    pool->fc_hyst_bits, pool->fc_up_crossing);
623         if (!is_rvu_otx2(rvu))
624                 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
625         seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
626
627         seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
628
629         seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
630
631         seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
632
633         seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
634                    pool->err_int, pool->err_int_ena);
635         seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
636         seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
637                    pool->thresh_int_ena, pool->thresh_up);
638         seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
639                    pool->thresh_qint_idx, pool->err_qint_idx);
640         if (!is_rvu_otx2(rvu))
641                 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
642 }
643
644 /* Reads aura/pool's ctx from admin queue */
645 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
646 {
647         void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
648         struct npa_aq_enq_req aq_req;
649         struct npa_aq_enq_rsp rsp;
650         struct rvu_pfvf *pfvf;
651         int aura, rc, max_id;
652         int npalf, id, all;
653         struct rvu *rvu;
654         u16 pcifunc;
655
656         rvu = m->private;
657
658         switch (ctype) {
659         case NPA_AQ_CTYPE_AURA:
660                 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
661                 id = rvu->rvu_dbg.npa_aura_ctx.id;
662                 all = rvu->rvu_dbg.npa_aura_ctx.all;
663                 break;
664
665         case NPA_AQ_CTYPE_POOL:
666                 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
667                 id = rvu->rvu_dbg.npa_pool_ctx.id;
668                 all = rvu->rvu_dbg.npa_pool_ctx.all;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
675                 return -EINVAL;
676
677         pfvf = rvu_get_pfvf(rvu, pcifunc);
678         if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
679                 seq_puts(m, "Aura context is not initialized\n");
680                 return -EINVAL;
681         } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
682                 seq_puts(m, "Pool context is not initialized\n");
683                 return -EINVAL;
684         }
685
686         memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
687         aq_req.hdr.pcifunc = pcifunc;
688         aq_req.ctype = ctype;
689         aq_req.op = NPA_AQ_INSTOP_READ;
690         if (ctype == NPA_AQ_CTYPE_AURA) {
691                 max_id = pfvf->aura_ctx->qsize;
692                 print_npa_ctx = print_npa_aura_ctx;
693         } else {
694                 max_id = pfvf->pool_ctx->qsize;
695                 print_npa_ctx = print_npa_pool_ctx;
696         }
697
698         if (id < 0 || id >= max_id) {
699                 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
700                            (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
701                         max_id - 1);
702                 return -EINVAL;
703         }
704
705         if (all)
706                 id = 0;
707         else
708                 max_id = id + 1;
709
710         for (aura = id; aura < max_id; aura++) {
711                 aq_req.aura_id = aura;
712                 seq_printf(m, "======%s : %d=======\n",
713                            (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
714                         aq_req.aura_id);
715                 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
716                 if (rc) {
717                         seq_puts(m, "Failed to read context\n");
718                         return -EINVAL;
719                 }
720                 print_npa_ctx(m, &rsp);
721         }
722         return 0;
723 }
724
725 static int write_npa_ctx(struct rvu *rvu, bool all,
726                          int npalf, int id, int ctype)
727 {
728         struct rvu_pfvf *pfvf;
729         int max_id = 0;
730         u16 pcifunc;
731
732         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
733                 return -EINVAL;
734
735         pfvf = rvu_get_pfvf(rvu, pcifunc);
736
737         if (ctype == NPA_AQ_CTYPE_AURA) {
738                 if (!pfvf->aura_ctx) {
739                         dev_warn(rvu->dev, "Aura context is not initialized\n");
740                         return -EINVAL;
741                 }
742                 max_id = pfvf->aura_ctx->qsize;
743         } else if (ctype == NPA_AQ_CTYPE_POOL) {
744                 if (!pfvf->pool_ctx) {
745                         dev_warn(rvu->dev, "Pool context is not initialized\n");
746                         return -EINVAL;
747                 }
748                 max_id = pfvf->pool_ctx->qsize;
749         }
750
751         if (id < 0 || id >= max_id) {
752                 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
753                          (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
754                         max_id - 1);
755                 return -EINVAL;
756         }
757
758         switch (ctype) {
759         case NPA_AQ_CTYPE_AURA:
760                 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
761                 rvu->rvu_dbg.npa_aura_ctx.id = id;
762                 rvu->rvu_dbg.npa_aura_ctx.all = all;
763                 break;
764
765         case NPA_AQ_CTYPE_POOL:
766                 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
767                 rvu->rvu_dbg.npa_pool_ctx.id = id;
768                 rvu->rvu_dbg.npa_pool_ctx.all = all;
769                 break;
770         default:
771                 return -EINVAL;
772         }
773         return 0;
774 }
775
776 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
777                                 const char __user *buffer, int *npalf,
778                                 int *id, bool *all)
779 {
780         int bytes_not_copied;
781         char *cmd_buf_tmp;
782         char *subtoken;
783         int ret;
784
785         bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
786         if (bytes_not_copied)
787                 return -EFAULT;
788
789         cmd_buf[*count] = '\0';
790         cmd_buf_tmp = strchr(cmd_buf, '\n');
791
792         if (cmd_buf_tmp) {
793                 *cmd_buf_tmp = '\0';
794                 *count = cmd_buf_tmp - cmd_buf + 1;
795         }
796
797         subtoken = strsep(&cmd_buf, " ");
798         ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
799         if (ret < 0)
800                 return ret;
801         subtoken = strsep(&cmd_buf, " ");
802         if (subtoken && strcmp(subtoken, "all") == 0) {
803                 *all = true;
804         } else {
805                 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
806                 if (ret < 0)
807                         return ret;
808         }
809         if (cmd_buf)
810                 return -EINVAL;
811         return ret;
812 }
813
814 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
815                                      const char __user *buffer,
816                                      size_t count, loff_t *ppos, int ctype)
817 {
818         char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
819                                         "aura" : "pool";
820         struct seq_file *seqfp = filp->private_data;
821         struct rvu *rvu = seqfp->private;
822         int npalf, id = 0, ret;
823         bool all = false;
824
825         if ((*ppos != 0) || !count)
826                 return -EINVAL;
827
828         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
829         if (!cmd_buf)
830                 return count;
831         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
832                                    &npalf, &id, &all);
833         if (ret < 0) {
834                 dev_info(rvu->dev,
835                          "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
836                          ctype_string, ctype_string);
837                 goto done;
838         } else {
839                 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
840         }
841 done:
842         kfree(cmd_buf);
843         return ret ? ret : count;
844 }
845
846 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
847                                           const char __user *buffer,
848                                           size_t count, loff_t *ppos)
849 {
850         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
851                                      NPA_AQ_CTYPE_AURA);
852 }
853
854 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
855 {
856         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
857 }
858
859 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
860
861 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
862                                           const char __user *buffer,
863                                           size_t count, loff_t *ppos)
864 {
865         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
866                                      NPA_AQ_CTYPE_POOL);
867 }
868
869 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
870 {
871         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
872 }
873
874 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
875
876 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
877                             int ctype, int transaction)
878 {
879         u64 req, out_req, lat, cant_alloc;
880         struct nix_hw *nix_hw;
881         struct rvu *rvu;
882         int port;
883
884         if (blk_addr == BLKADDR_NDC_NPA0) {
885                 rvu = s->private;
886         } else {
887                 nix_hw = s->private;
888                 rvu = nix_hw->rvu;
889         }
890
891         for (port = 0; port < NDC_MAX_PORT; port++) {
892                 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
893                                                 (port, ctype, transaction));
894                 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
895                                                 (port, ctype, transaction));
896                 out_req = rvu_read64(rvu, blk_addr,
897                                      NDC_AF_PORTX_RTX_RWX_OSTDN_PC
898                                      (port, ctype, transaction));
899                 cant_alloc = rvu_read64(rvu, blk_addr,
900                                         NDC_AF_PORTX_RTX_CANT_ALLOC_PC
901                                         (port, transaction));
902                 seq_printf(s, "\nPort:%d\n", port);
903                 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
904                 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
905                 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
906                 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
907                 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
908         }
909 }
910
911 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
912 {
913         seq_puts(s, "\n***** CACHE mode read stats *****\n");
914         ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
915         seq_puts(s, "\n***** CACHE mode write stats *****\n");
916         ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
917         seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
918         ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
919         seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
920         ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
921         return 0;
922 }
923
924 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
925 {
926         return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
927 }
928
929 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
930
931 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
932 {
933         struct nix_hw *nix_hw;
934         struct rvu *rvu;
935         int bank, max_bank;
936
937         if (blk_addr == BLKADDR_NDC_NPA0) {
938                 rvu = s->private;
939         } else {
940                 nix_hw = s->private;
941                 rvu = nix_hw->rvu;
942         }
943
944         max_bank = NDC_MAX_BANK(rvu, blk_addr);
945         for (bank = 0; bank < max_bank; bank++) {
946                 seq_printf(s, "BANK:%d\n", bank);
947                 seq_printf(s, "\tHits:\t%lld\n",
948                            (u64)rvu_read64(rvu, blk_addr,
949                            NDC_AF_BANKX_HIT_PC(bank)));
950                 seq_printf(s, "\tMiss:\t%lld\n",
951                            (u64)rvu_read64(rvu, blk_addr,
952                             NDC_AF_BANKX_MISS_PC(bank)));
953         }
954         return 0;
955 }
956
957 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
958 {
959         struct nix_hw *nix_hw = filp->private;
960         int blkaddr = 0;
961         int ndc_idx = 0;
962
963         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
964                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
965         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
966
967         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
968 }
969
970 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
971
972 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
973 {
974         struct nix_hw *nix_hw = filp->private;
975         int blkaddr = 0;
976         int ndc_idx = 0;
977
978         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
979                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
980         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
981
982         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
983 }
984
985 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
986
987 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
988                                              void *unused)
989 {
990         return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
991 }
992
993 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
994
995 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
996                                                 void *unused)
997 {
998         struct nix_hw *nix_hw = filp->private;
999         int ndc_idx = NPA0_U;
1000         int blkaddr = 0;
1001
1002         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1003                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1004
1005         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1006 }
1007
1008 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1009
1010 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1011                                                 void *unused)
1012 {
1013         struct nix_hw *nix_hw = filp->private;
1014         int ndc_idx = NPA0_U;
1015         int blkaddr = 0;
1016
1017         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1018                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1019
1020         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1021 }
1022
1023 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1024
1025 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1026                                    struct nix_cn10k_sq_ctx_s *sq_ctx)
1027 {
1028         seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1029                    sq_ctx->ena, sq_ctx->qint_idx);
1030         seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1031                    sq_ctx->substream, sq_ctx->sdp_mcast);
1032         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1033                    sq_ctx->cq, sq_ctx->sqe_way_mask);
1034
1035         seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1036                    sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1037         seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1038                    sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1039         seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1040                    sq_ctx->default_chan, sq_ctx->sqb_count);
1041
1042         seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1043         seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1044         seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1045                    sq_ctx->sqb_aura, sq_ctx->sq_int);
1046         seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1047                    sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1048
1049         seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1050                    sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1051         seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1052                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1053         seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1054                    sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1055         seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1056                    sq_ctx->tail_offset, sq_ctx->smenq_offset);
1057         seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1058                    sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1059
1060         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1061         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1062         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1063         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1064                    sq_ctx->smenq_next_sqb);
1065
1066         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1067
1068         seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1069         seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1070                    sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1071         seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1072                    sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1073         seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1074                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1075
1076         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1077                    (u64)sq_ctx->scm_lso_rem);
1078         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1079         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1080         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1081                    (u64)sq_ctx->dropped_octs);
1082         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1083                    (u64)sq_ctx->dropped_pkts);
1084 }
1085
1086 /* Dumps given nix_sq's context */
1087 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1088 {
1089         struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1090         struct nix_hw *nix_hw = m->private;
1091         struct rvu *rvu = nix_hw->rvu;
1092
1093         if (!is_rvu_otx2(rvu)) {
1094                 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1095                 return;
1096         }
1097         seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1098                    sq_ctx->sqe_way_mask, sq_ctx->cq);
1099         seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1100                    sq_ctx->sdp_mcast, sq_ctx->substream);
1101         seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1102                    sq_ctx->qint_idx, sq_ctx->ena);
1103
1104         seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1105                    sq_ctx->sqb_count, sq_ctx->default_chan);
1106         seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1107                    sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1108         seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1109                    sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1110
1111         seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1112                    sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1113         seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1114                    sq_ctx->sq_int, sq_ctx->sqb_aura);
1115         seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1116
1117         seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1118                    sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1119         seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1120                    sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1121         seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1122                    sq_ctx->smenq_offset, sq_ctx->tail_offset);
1123         seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1124                    sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1125         seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1126                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1127         seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1128                    sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1129
1130         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1131         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1132         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1133         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1134                    sq_ctx->smenq_next_sqb);
1135
1136         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1137
1138         seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1139                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1140         seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1141                    sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1142         seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1143                    sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1144         seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1145
1146         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1147                    (u64)sq_ctx->scm_lso_rem);
1148         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1149         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1150         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1151                    (u64)sq_ctx->dropped_octs);
1152         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1153                    (u64)sq_ctx->dropped_pkts);
1154 }
1155
1156 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1157                                    struct nix_cn10k_rq_ctx_s *rq_ctx)
1158 {
1159         seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1160                    rq_ctx->ena, rq_ctx->sso_ena);
1161         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1162                    rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1163         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1164                    rq_ctx->cq, rq_ctx->lenerr_dis);
1165         seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1166                    rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1167         seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1168                    rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1169         seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1170                    rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1171         seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1172
1173         seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1174                    rq_ctx->spb_aura, rq_ctx->lpb_aura);
1175         seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1176         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1177                    rq_ctx->sso_grp, rq_ctx->sso_tt);
1178         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1179                    rq_ctx->pb_caching, rq_ctx->wqe_caching);
1180         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1181                    rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1182         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1183                    rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1184         seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1185                    rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1186
1187         seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1188         seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1189         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1190         seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1191                    rq_ctx->wqe_skip, rq_ctx->spb_ena);
1192         seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1193                    rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1194         seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1195                    rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1196         seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1197                    rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1198
1199         seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1200                    rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1201         seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1202                    rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1203         seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1204                    rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1205         seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1206                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1207
1208         seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1209                    rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1210         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1211                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1212         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1213                    rq_ctx->rq_int, rq_ctx->rq_int_ena);
1214         seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1215
1216         seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1217                    rq_ctx->ltag, rq_ctx->good_utag);
1218         seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1219                    rq_ctx->bad_utag, rq_ctx->flow_tagw);
1220         seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1221                    rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1222         seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1223                    rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1224         seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1225
1226         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1227         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1228         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1229         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1230         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1231 }
1232
1233 /* Dumps given nix_rq's context */
1234 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1235 {
1236         struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1237         struct nix_hw *nix_hw = m->private;
1238         struct rvu *rvu = nix_hw->rvu;
1239
1240         if (!is_rvu_otx2(rvu)) {
1241                 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1242                 return;
1243         }
1244
1245         seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1246                    rq_ctx->wqe_aura, rq_ctx->substream);
1247         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1248                    rq_ctx->cq, rq_ctx->ena_wqwd);
1249         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1250                    rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1251         seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1252
1253         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1254                    rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1255         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1256                    rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1257         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1258                    rq_ctx->pb_caching, rq_ctx->sso_tt);
1259         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1260                    rq_ctx->sso_grp, rq_ctx->lpb_aura);
1261         seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1262
1263         seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1264                    rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1265         seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1266                    rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1267         seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1268                    rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1269         seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1270                    rq_ctx->spb_ena, rq_ctx->wqe_skip);
1271         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1272
1273         seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1274                    rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1275         seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1276                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1277         seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1278                    rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1279         seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1280                    rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1281
1282         seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1283                    rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1284         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1285                    rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1286         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1287                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1288         seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1289
1290         seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1291                    rq_ctx->flow_tagw, rq_ctx->bad_utag);
1292         seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1293                    rq_ctx->good_utag, rq_ctx->ltag);
1294
1295         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1296         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1297         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1298         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1299         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1300 }
1301
1302 /* Dumps given nix_cq's context */
1303 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1304 {
1305         struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1306
1307         seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1308
1309         seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1310         seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1311                    cq_ctx->avg_con, cq_ctx->cint_idx);
1312         seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1313                    cq_ctx->cq_err, cq_ctx->qint_idx);
1314         seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1315                    cq_ctx->bpid, cq_ctx->bp_ena);
1316
1317         seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1318                    cq_ctx->update_time, cq_ctx->avg_level);
1319         seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1320                    cq_ctx->head, cq_ctx->tail);
1321
1322         seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1323                    cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1324         seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1325                    cq_ctx->qsize, cq_ctx->caching);
1326         seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1327                    cq_ctx->substream, cq_ctx->ena);
1328         seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1329                    cq_ctx->drop_ena, cq_ctx->drop);
1330         seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1331 }
1332
1333 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1334                                          void *unused, int ctype)
1335 {
1336         void (*print_nix_ctx)(struct seq_file *filp,
1337                               struct nix_aq_enq_rsp *rsp) = NULL;
1338         struct nix_hw *nix_hw = filp->private;
1339         struct rvu *rvu = nix_hw->rvu;
1340         struct nix_aq_enq_req aq_req;
1341         struct nix_aq_enq_rsp rsp;
1342         char *ctype_string = NULL;
1343         int qidx, rc, max_id = 0;
1344         struct rvu_pfvf *pfvf;
1345         int nixlf, id, all;
1346         u16 pcifunc;
1347
1348         switch (ctype) {
1349         case NIX_AQ_CTYPE_CQ:
1350                 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1351                 id = rvu->rvu_dbg.nix_cq_ctx.id;
1352                 all = rvu->rvu_dbg.nix_cq_ctx.all;
1353                 break;
1354
1355         case NIX_AQ_CTYPE_SQ:
1356                 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1357                 id = rvu->rvu_dbg.nix_sq_ctx.id;
1358                 all = rvu->rvu_dbg.nix_sq_ctx.all;
1359                 break;
1360
1361         case NIX_AQ_CTYPE_RQ:
1362                 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1363                 id = rvu->rvu_dbg.nix_rq_ctx.id;
1364                 all = rvu->rvu_dbg.nix_rq_ctx.all;
1365                 break;
1366
1367         default:
1368                 return -EINVAL;
1369         }
1370
1371         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1372                 return -EINVAL;
1373
1374         pfvf = rvu_get_pfvf(rvu, pcifunc);
1375         if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1376                 seq_puts(filp, "SQ context is not initialized\n");
1377                 return -EINVAL;
1378         } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1379                 seq_puts(filp, "RQ context is not initialized\n");
1380                 return -EINVAL;
1381         } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1382                 seq_puts(filp, "CQ context is not initialized\n");
1383                 return -EINVAL;
1384         }
1385
1386         if (ctype == NIX_AQ_CTYPE_SQ) {
1387                 max_id = pfvf->sq_ctx->qsize;
1388                 ctype_string = "sq";
1389                 print_nix_ctx = print_nix_sq_ctx;
1390         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1391                 max_id = pfvf->rq_ctx->qsize;
1392                 ctype_string = "rq";
1393                 print_nix_ctx = print_nix_rq_ctx;
1394         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1395                 max_id = pfvf->cq_ctx->qsize;
1396                 ctype_string = "cq";
1397                 print_nix_ctx = print_nix_cq_ctx;
1398         }
1399
1400         memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1401         aq_req.hdr.pcifunc = pcifunc;
1402         aq_req.ctype = ctype;
1403         aq_req.op = NIX_AQ_INSTOP_READ;
1404         if (all)
1405                 id = 0;
1406         else
1407                 max_id = id + 1;
1408         for (qidx = id; qidx < max_id; qidx++) {
1409                 aq_req.qidx = qidx;
1410                 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1411                            ctype_string, nixlf, aq_req.qidx);
1412                 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1413                 if (rc) {
1414                         seq_puts(filp, "Failed to read the context\n");
1415                         return -EINVAL;
1416                 }
1417                 print_nix_ctx(filp, &rsp);
1418         }
1419         return 0;
1420 }
1421
1422 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1423                                int id, int ctype, char *ctype_string,
1424                                struct seq_file *m)
1425 {
1426         struct nix_hw *nix_hw = m->private;
1427         struct rvu_pfvf *pfvf;
1428         int max_id = 0;
1429         u16 pcifunc;
1430
1431         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1432                 return -EINVAL;
1433
1434         pfvf = rvu_get_pfvf(rvu, pcifunc);
1435
1436         if (ctype == NIX_AQ_CTYPE_SQ) {
1437                 if (!pfvf->sq_ctx) {
1438                         dev_warn(rvu->dev, "SQ context is not initialized\n");
1439                         return -EINVAL;
1440                 }
1441                 max_id = pfvf->sq_ctx->qsize;
1442         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1443                 if (!pfvf->rq_ctx) {
1444                         dev_warn(rvu->dev, "RQ context is not initialized\n");
1445                         return -EINVAL;
1446                 }
1447                 max_id = pfvf->rq_ctx->qsize;
1448         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1449                 if (!pfvf->cq_ctx) {
1450                         dev_warn(rvu->dev, "CQ context is not initialized\n");
1451                         return -EINVAL;
1452                 }
1453                 max_id = pfvf->cq_ctx->qsize;
1454         }
1455
1456         if (id < 0 || id >= max_id) {
1457                 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1458                          ctype_string, max_id - 1);
1459                 return -EINVAL;
1460         }
1461         switch (ctype) {
1462         case NIX_AQ_CTYPE_CQ:
1463                 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1464                 rvu->rvu_dbg.nix_cq_ctx.id = id;
1465                 rvu->rvu_dbg.nix_cq_ctx.all = all;
1466                 break;
1467
1468         case NIX_AQ_CTYPE_SQ:
1469                 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1470                 rvu->rvu_dbg.nix_sq_ctx.id = id;
1471                 rvu->rvu_dbg.nix_sq_ctx.all = all;
1472                 break;
1473
1474         case NIX_AQ_CTYPE_RQ:
1475                 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1476                 rvu->rvu_dbg.nix_rq_ctx.id = id;
1477                 rvu->rvu_dbg.nix_rq_ctx.all = all;
1478                 break;
1479         default:
1480                 return -EINVAL;
1481         }
1482         return 0;
1483 }
1484
1485 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1486                                            const char __user *buffer,
1487                                            size_t count, loff_t *ppos,
1488                                            int ctype)
1489 {
1490         struct seq_file *m = filp->private_data;
1491         struct nix_hw *nix_hw = m->private;
1492         struct rvu *rvu = nix_hw->rvu;
1493         char *cmd_buf, *ctype_string;
1494         int nixlf, id = 0, ret;
1495         bool all = false;
1496
1497         if ((*ppos != 0) || !count)
1498                 return -EINVAL;
1499
1500         switch (ctype) {
1501         case NIX_AQ_CTYPE_SQ:
1502                 ctype_string = "sq";
1503                 break;
1504         case NIX_AQ_CTYPE_RQ:
1505                 ctype_string = "rq";
1506                 break;
1507         case NIX_AQ_CTYPE_CQ:
1508                 ctype_string = "cq";
1509                 break;
1510         default:
1511                 return -EINVAL;
1512         }
1513
1514         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1515
1516         if (!cmd_buf)
1517                 return count;
1518
1519         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1520                                    &nixlf, &id, &all);
1521         if (ret < 0) {
1522                 dev_info(rvu->dev,
1523                          "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1524                          ctype_string, ctype_string);
1525                 goto done;
1526         } else {
1527                 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1528                                           ctype_string, m);
1529         }
1530 done:
1531         kfree(cmd_buf);
1532         return ret ? ret : count;
1533 }
1534
1535 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1536                                         const char __user *buffer,
1537                                         size_t count, loff_t *ppos)
1538 {
1539         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1540                                             NIX_AQ_CTYPE_SQ);
1541 }
1542
1543 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1544 {
1545         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1546 }
1547
1548 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1549
1550 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1551                                         const char __user *buffer,
1552                                         size_t count, loff_t *ppos)
1553 {
1554         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1555                                             NIX_AQ_CTYPE_RQ);
1556 }
1557
1558 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1559 {
1560         return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1561 }
1562
1563 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1564
1565 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1566                                         const char __user *buffer,
1567                                         size_t count, loff_t *ppos)
1568 {
1569         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1570                                             NIX_AQ_CTYPE_CQ);
1571 }
1572
1573 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1574 {
1575         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1576 }
1577
1578 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1579
1580 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1581                                  unsigned long *bmap, char *qtype)
1582 {
1583         char *buf;
1584
1585         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586         if (!buf)
1587                 return;
1588
1589         bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1590         seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1591         seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1592                    qtype, buf);
1593         kfree(buf);
1594 }
1595
1596 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1597 {
1598         if (!pfvf->cq_ctx)
1599                 seq_puts(filp, "cq context is not initialized\n");
1600         else
1601                 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1602                                      "cq");
1603
1604         if (!pfvf->rq_ctx)
1605                 seq_puts(filp, "rq context is not initialized\n");
1606         else
1607                 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1608                                      "rq");
1609
1610         if (!pfvf->sq_ctx)
1611                 seq_puts(filp, "sq context is not initialized\n");
1612         else
1613                 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1614                                      "sq");
1615 }
1616
1617 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1618                                        const char __user *buffer,
1619                                        size_t count, loff_t *ppos)
1620 {
1621         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1622                                    BLKTYPE_NIX);
1623 }
1624
1625 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1626 {
1627         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1628 }
1629
1630 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1631
1632 static void print_band_prof_ctx(struct seq_file *m,
1633                                 struct nix_bandprof_s *prof)
1634 {
1635         char *str;
1636
1637         switch (prof->pc_mode) {
1638         case NIX_RX_PC_MODE_VLAN:
1639                 str = "VLAN";
1640                 break;
1641         case NIX_RX_PC_MODE_DSCP:
1642                 str = "DSCP";
1643                 break;
1644         case NIX_RX_PC_MODE_GEN:
1645                 str = "Generic";
1646                 break;
1647         case NIX_RX_PC_MODE_RSVD:
1648                 str = "Reserved";
1649                 break;
1650         }
1651         seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1652         str = (prof->icolor == 3) ? "Color blind" :
1653                 (prof->icolor == 0) ? "Green" :
1654                 (prof->icolor == 1) ? "Yellow" : "Red";
1655         seq_printf(m, "W0: icolor\t\t%s\n", str);
1656         seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1657         seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1658         seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1659         seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1660         seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1661         seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1662         seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1663         seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1664
1665         seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1666         str = (prof->lmode == 0) ? "byte" : "packet";
1667         seq_printf(m, "W1: lmode\t\t%s\n", str);
1668         seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1669         seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1670         seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1671         seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1672         str = (prof->gc_action == 0) ? "PASS" :
1673                 (prof->gc_action == 1) ? "DROP" : "RED";
1674         seq_printf(m, "W1: gc_action\t\t%s\n", str);
1675         str = (prof->yc_action == 0) ? "PASS" :
1676                 (prof->yc_action == 1) ? "DROP" : "RED";
1677         seq_printf(m, "W1: yc_action\t\t%s\n", str);
1678         str = (prof->rc_action == 0) ? "PASS" :
1679                 (prof->rc_action == 1) ? "DROP" : "RED";
1680         seq_printf(m, "W1: rc_action\t\t%s\n", str);
1681         seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1682         seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1683         seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1684
1685         seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1686         seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1687         seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1688         seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1689                    (u64)prof->green_pkt_pass);
1690         seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1691                    (u64)prof->yellow_pkt_pass);
1692         seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1693         seq_printf(m, "W7: green_octs_pass\t%lld\n",
1694                    (u64)prof->green_octs_pass);
1695         seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1696                    (u64)prof->yellow_octs_pass);
1697         seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1698         seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1699                    (u64)prof->green_pkt_drop);
1700         seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1701                    (u64)prof->yellow_pkt_drop);
1702         seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1703         seq_printf(m, "W13: green_octs_drop\t%lld\n",
1704                    (u64)prof->green_octs_drop);
1705         seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1706                    (u64)prof->yellow_octs_drop);
1707         seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1708         seq_puts(m, "==============================\n");
1709 }
1710
1711 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1712 {
1713         struct nix_hw *nix_hw = m->private;
1714         struct nix_cn10k_aq_enq_req aq_req;
1715         struct nix_cn10k_aq_enq_rsp aq_rsp;
1716         struct rvu *rvu = nix_hw->rvu;
1717         struct nix_ipolicer *ipolicer;
1718         int layer, prof_idx, idx, rc;
1719         u16 pcifunc;
1720         char *str;
1721
1722         /* Ingress policers do not exist on all platforms */
1723         if (!nix_hw->ipolicer)
1724                 return 0;
1725
1726         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1727                 if (layer == BAND_PROF_INVAL_LAYER)
1728                         continue;
1729                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1730                         (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1731
1732                 seq_printf(m, "\n%s bandwidth profiles\n", str);
1733                 seq_puts(m, "=======================\n");
1734
1735                 ipolicer = &nix_hw->ipolicer[layer];
1736
1737                 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1738                         if (is_rsrc_free(&ipolicer->band_prof, idx))
1739                                 continue;
1740
1741                         prof_idx = (idx & 0x3FFF) | (layer << 14);
1742                         rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1743                                                  0x00, NIX_AQ_CTYPE_BANDPROF,
1744                                                  prof_idx);
1745                         if (rc) {
1746                                 dev_err(rvu->dev,
1747                                         "%s: Failed to fetch context of %s profile %d, err %d\n",
1748                                         __func__, str, idx, rc);
1749                                 return 0;
1750                         }
1751                         seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1752                         pcifunc = ipolicer->pfvf_map[idx];
1753                         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1754                                 seq_printf(m, "Allocated to :: PF %d\n",
1755                                            rvu_get_pf(pcifunc));
1756                         else
1757                                 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1758                                            rvu_get_pf(pcifunc),
1759                                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1760                         print_band_prof_ctx(m, &aq_rsp.prof);
1761                 }
1762         }
1763         return 0;
1764 }
1765
1766 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1767
1768 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1769 {
1770         struct nix_hw *nix_hw = m->private;
1771         struct nix_ipolicer *ipolicer;
1772         int layer;
1773         char *str;
1774
1775         /* Ingress policers do not exist on all platforms */
1776         if (!nix_hw->ipolicer)
1777                 return 0;
1778
1779         seq_puts(m, "\nBandwidth profile resource free count\n");
1780         seq_puts(m, "=====================================\n");
1781         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1782                 if (layer == BAND_PROF_INVAL_LAYER)
1783                         continue;
1784                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1785                         (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1786
1787                 ipolicer = &nix_hw->ipolicer[layer];
1788                 seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1789                            ipolicer->band_prof.max,
1790                            rvu_rsrc_free_count(&ipolicer->band_prof));
1791         }
1792         seq_puts(m, "=====================================\n");
1793
1794         return 0;
1795 }
1796
1797 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1798
1799 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1800 {
1801         struct nix_hw *nix_hw;
1802
1803         if (!is_block_implemented(rvu->hw, blkaddr))
1804                 return;
1805
1806         if (blkaddr == BLKADDR_NIX0) {
1807                 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1808                 nix_hw = &rvu->hw->nix[0];
1809         } else {
1810                 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1811                                                       rvu->rvu_dbg.root);
1812                 nix_hw = &rvu->hw->nix[1];
1813         }
1814
1815         debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1816                             &rvu_dbg_nix_sq_ctx_fops);
1817         debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1818                             &rvu_dbg_nix_rq_ctx_fops);
1819         debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1820                             &rvu_dbg_nix_cq_ctx_fops);
1821         debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1822                             &rvu_dbg_nix_ndc_tx_cache_fops);
1823         debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1824                             &rvu_dbg_nix_ndc_rx_cache_fops);
1825         debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1826                             &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1827         debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1828                             &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1829         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1830                             &rvu_dbg_nix_qsize_fops);
1831         debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1832                             &rvu_dbg_nix_band_prof_ctx_fops);
1833         debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1834                             &rvu_dbg_nix_band_prof_rsrc_fops);
1835 }
1836
1837 static void rvu_dbg_npa_init(struct rvu *rvu)
1838 {
1839         rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1840
1841         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1842                             &rvu_dbg_npa_qsize_fops);
1843         debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1844                             &rvu_dbg_npa_aura_ctx_fops);
1845         debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1846                             &rvu_dbg_npa_pool_ctx_fops);
1847         debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1848                             &rvu_dbg_npa_ndc_cache_fops);
1849         debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1850                             &rvu_dbg_npa_ndc_hits_miss_fops);
1851 }
1852
1853 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)                          \
1854         ({                                                              \
1855                 u64 cnt;                                                \
1856                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1857                                              NIX_STATS_RX, &(cnt));     \
1858                 if (!err)                                               \
1859                         seq_printf(s, "%s: %llu\n", name, cnt);         \
1860                 cnt;                                                    \
1861         })
1862
1863 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)                  \
1864         ({                                                              \
1865                 u64 cnt;                                                \
1866                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1867                                           NIX_STATS_TX, &(cnt));        \
1868                 if (!err)                                               \
1869                         seq_printf(s, "%s: %llu\n", name, cnt);         \
1870                 cnt;                                                    \
1871         })
1872
1873 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1874 {
1875         struct cgx_link_user_info linfo;
1876         struct mac_ops *mac_ops;
1877         void *cgxd = s->private;
1878         u64 ucast, mcast, bcast;
1879         int stat = 0, err = 0;
1880         u64 tx_stat, rx_stat;
1881         struct rvu *rvu;
1882
1883         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1884                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1885         if (!rvu)
1886                 return -ENODEV;
1887
1888         mac_ops = get_mac_ops(cgxd);
1889
1890         if (!mac_ops)
1891                 return 0;
1892
1893         /* Link status */
1894         seq_puts(s, "\n=======Link Status======\n\n");
1895         err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1896         if (err)
1897                 seq_puts(s, "Failed to read link status\n");
1898         seq_printf(s, "\nLink is %s %d Mbps\n\n",
1899                    linfo.link_up ? "UP" : "DOWN", linfo.speed);
1900
1901         /* Rx stats */
1902         seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1903                    mac_ops->name);
1904         ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1905         if (err)
1906                 return err;
1907         mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1908         if (err)
1909                 return err;
1910         bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1911         if (err)
1912                 return err;
1913         seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1914         PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1915         if (err)
1916                 return err;
1917         PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1918         if (err)
1919                 return err;
1920         PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1921         if (err)
1922                 return err;
1923
1924         /* Tx stats */
1925         seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1926                    mac_ops->name);
1927         ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1928         if (err)
1929                 return err;
1930         mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1931         if (err)
1932                 return err;
1933         bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1934         if (err)
1935                 return err;
1936         seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1937         PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1938         if (err)
1939                 return err;
1940         PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1941         if (err)
1942                 return err;
1943
1944         /* Rx stats */
1945         seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1946         while (stat < mac_ops->rx_stats_cnt) {
1947                 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1948                 if (err)
1949                         return err;
1950                 if (is_rvu_otx2(rvu))
1951                         seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1952                                    rx_stat);
1953                 else
1954                         seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1955                                    rx_stat);
1956                 stat++;
1957         }
1958
1959         /* Tx stats */
1960         stat = 0;
1961         seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1962         while (stat < mac_ops->tx_stats_cnt) {
1963                 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1964                 if (err)
1965                         return err;
1966
1967         if (is_rvu_otx2(rvu))
1968                 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1969                            tx_stat);
1970         else
1971                 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1972                            tx_stat);
1973         stat++;
1974         }
1975
1976         return err;
1977 }
1978
1979 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
1980 {
1981         struct dentry *current_dir;
1982         char *buf;
1983
1984         current_dir = filp->file->f_path.dentry->d_parent;
1985         buf = strrchr(current_dir->d_name.name, 'c');
1986         if (!buf)
1987                 return -EINVAL;
1988
1989         return kstrtoint(buf + 1, 10, lmac_id);
1990 }
1991
1992 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1993 {
1994         int lmac_id, err;
1995
1996         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
1997         if (!err)
1998                 return cgx_print_stats(filp, lmac_id);
1999
2000         return err;
2001 }
2002
2003 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2004
2005 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2006 {
2007         struct pci_dev *pdev = NULL;
2008         void *cgxd = s->private;
2009         char *bcast, *mcast;
2010         u16 index, domain;
2011         u8 dmac[ETH_ALEN];
2012         struct rvu *rvu;
2013         u64 cfg, mac;
2014         int pf;
2015
2016         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2017                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2018         if (!rvu)
2019                 return -ENODEV;
2020
2021         pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2022         domain = 2;
2023
2024         pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2025         if (!pdev)
2026                 return 0;
2027
2028         cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2029         bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2030         mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2031
2032         seq_puts(s,
2033                  "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2034         seq_printf(s, "%s  PF%d  %9s  %9s",
2035                    dev_name(&pdev->dev), pf, bcast, mcast);
2036         if (cfg & CGX_DMAC_CAM_ACCEPT)
2037                 seq_printf(s, "%12s\n\n", "UNICAST");
2038         else
2039                 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2040
2041         seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2042
2043         for (index = 0 ; index < 32 ; index++) {
2044                 cfg = cgx_read_dmac_entry(cgxd, index);
2045                 /* Display enabled dmac entries associated with current lmac */
2046                 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2047                     FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2048                         mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2049                         u64_to_ether_addr(mac, dmac);
2050                         seq_printf(s, "%7d     %pM\n", index, dmac);
2051                 }
2052         }
2053
2054         return 0;
2055 }
2056
2057 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2058 {
2059         int err, lmac_id;
2060
2061         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2062         if (!err)
2063                 return cgx_print_dmac_flt(filp, lmac_id);
2064
2065         return err;
2066 }
2067
2068 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2069
2070 static void rvu_dbg_cgx_init(struct rvu *rvu)
2071 {
2072         struct mac_ops *mac_ops;
2073         unsigned long lmac_bmap;
2074         int i, lmac_id;
2075         char dname[20];
2076         void *cgx;
2077
2078         if (!cgx_get_cgxcnt_max())
2079                 return;
2080
2081         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2082         if (!mac_ops)
2083                 return;
2084
2085         rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2086                                                    rvu->rvu_dbg.root);
2087
2088         for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2089                 cgx = rvu_cgx_pdata(i, rvu);
2090                 if (!cgx)
2091                         continue;
2092                 lmac_bmap = cgx_get_lmac_bmap(cgx);
2093                 /* cgx debugfs dir */
2094                 sprintf(dname, "%s%d", mac_ops->name, i);
2095                 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2096                                                       rvu->rvu_dbg.cgx_root);
2097
2098                 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2099                         /* lmac debugfs dir */
2100                         sprintf(dname, "lmac%d", lmac_id);
2101                         rvu->rvu_dbg.lmac =
2102                                 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2103
2104                         debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2105                                             cgx, &rvu_dbg_cgx_stat_fops);
2106                         debugfs_create_file("mac_filter", 0600,
2107                                             rvu->rvu_dbg.lmac, cgx,
2108                                             &rvu_dbg_cgx_dmac_flt_fops);
2109                 }
2110         }
2111 }
2112
2113 /* NPC debugfs APIs */
2114 static void rvu_print_npc_mcam_info(struct seq_file *s,
2115                                     u16 pcifunc, int blkaddr)
2116 {
2117         struct rvu *rvu = s->private;
2118         int entry_acnt, entry_ecnt;
2119         int cntr_acnt, cntr_ecnt;
2120
2121         rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2122                                           &entry_acnt, &entry_ecnt);
2123         rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2124                                             &cntr_acnt, &cntr_ecnt);
2125         if (!entry_acnt && !cntr_acnt)
2126                 return;
2127
2128         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2129                 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2130                            rvu_get_pf(pcifunc));
2131         else
2132                 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2133                            rvu_get_pf(pcifunc),
2134                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2135
2136         if (entry_acnt) {
2137                 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2138                 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2139         }
2140         if (cntr_acnt) {
2141                 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2142                 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2143         }
2144 }
2145
2146 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2147 {
2148         struct rvu *rvu = filp->private;
2149         int pf, vf, numvfs, blkaddr;
2150         struct npc_mcam *mcam;
2151         u16 pcifunc, counters;
2152         u64 cfg;
2153
2154         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2155         if (blkaddr < 0)
2156                 return -ENODEV;
2157
2158         mcam = &rvu->hw->mcam;
2159         counters = rvu->hw->npc_counters;
2160
2161         seq_puts(filp, "\nNPC MCAM info:\n");
2162         /* MCAM keywidth on receive and transmit sides */
2163         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2164         cfg = (cfg >> 32) & 0x07;
2165         seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2166                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2167                    "224bits" : "448bits"));
2168         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2169         cfg = (cfg >> 32) & 0x07;
2170         seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2171                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2172                    "224bits" : "448bits"));
2173
2174         mutex_lock(&mcam->lock);
2175         /* MCAM entries */
2176         seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2177         seq_printf(filp, "\t\t Reserved \t: %d\n",
2178                    mcam->total_entries - mcam->bmap_entries);
2179         seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2180
2181         /* MCAM counters */
2182         seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2183         seq_printf(filp, "\t\t Reserved \t: %d\n",
2184                    counters - mcam->counters.max);
2185         seq_printf(filp, "\t\t Available \t: %d\n",
2186                    rvu_rsrc_free_count(&mcam->counters));
2187
2188         if (mcam->bmap_entries == mcam->bmap_fcnt) {
2189                 mutex_unlock(&mcam->lock);
2190                 return 0;
2191         }
2192
2193         seq_puts(filp, "\n\t\t Current allocation\n");
2194         seq_puts(filp, "\t\t====================\n");
2195         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2196                 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2197                 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2198
2199                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2200                 numvfs = (cfg >> 12) & 0xFF;
2201                 for (vf = 0; vf < numvfs; vf++) {
2202                         pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2203                         rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2204                 }
2205         }
2206
2207         mutex_unlock(&mcam->lock);
2208         return 0;
2209 }
2210
2211 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2212
2213 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2214                                              void *unused)
2215 {
2216         struct rvu *rvu = filp->private;
2217         struct npc_mcam *mcam;
2218         int blkaddr;
2219
2220         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2221         if (blkaddr < 0)
2222                 return -ENODEV;
2223
2224         mcam = &rvu->hw->mcam;
2225
2226         seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2227         seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2228                    rvu_read64(rvu, blkaddr,
2229                               NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2230
2231         return 0;
2232 }
2233
2234 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2235
2236 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2237                                         struct rvu_npc_mcam_rule *rule)
2238 {
2239         u8 bit;
2240
2241         for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2242                 seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2243                 switch (bit) {
2244                 case NPC_DMAC:
2245                         seq_printf(s, "%pM ", rule->packet.dmac);
2246                         seq_printf(s, "mask %pM\n", rule->mask.dmac);
2247                         break;
2248                 case NPC_SMAC:
2249                         seq_printf(s, "%pM ", rule->packet.smac);
2250                         seq_printf(s, "mask %pM\n", rule->mask.smac);
2251                         break;
2252                 case NPC_ETYPE:
2253                         seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2254                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2255                         break;
2256                 case NPC_OUTER_VID:
2257                         seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2258                         seq_printf(s, "mask 0x%x\n",
2259                                    ntohs(rule->mask.vlan_tci));
2260                         break;
2261                 case NPC_TOS:
2262                         seq_printf(s, "%d ", rule->packet.tos);
2263                         seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2264                         break;
2265                 case NPC_SIP_IPV4:
2266                         seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2267                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2268                         break;
2269                 case NPC_DIP_IPV4:
2270                         seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2271                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2272                         break;
2273                 case NPC_SIP_IPV6:
2274                         seq_printf(s, "%pI6 ", rule->packet.ip6src);
2275                         seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2276                         break;
2277                 case NPC_DIP_IPV6:
2278                         seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2279                         seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2280                         break;
2281                 case NPC_SPORT_TCP:
2282                 case NPC_SPORT_UDP:
2283                 case NPC_SPORT_SCTP:
2284                         seq_printf(s, "%d ", ntohs(rule->packet.sport));
2285                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2286                         break;
2287                 case NPC_DPORT_TCP:
2288                 case NPC_DPORT_UDP:
2289                 case NPC_DPORT_SCTP:
2290                         seq_printf(s, "%d ", ntohs(rule->packet.dport));
2291                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2292                         break;
2293                 default:
2294                         seq_puts(s, "\n");
2295                         break;
2296                 }
2297         }
2298 }
2299
2300 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2301                                          struct rvu_npc_mcam_rule *rule)
2302 {
2303         if (is_npc_intf_tx(rule->intf)) {
2304                 switch (rule->tx_action.op) {
2305                 case NIX_TX_ACTIONOP_DROP:
2306                         seq_puts(s, "\taction: Drop\n");
2307                         break;
2308                 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2309                         seq_puts(s, "\taction: Unicast to default channel\n");
2310                         break;
2311                 case NIX_TX_ACTIONOP_UCAST_CHAN:
2312                         seq_printf(s, "\taction: Unicast to channel %d\n",
2313                                    rule->tx_action.index);
2314                         break;
2315                 case NIX_TX_ACTIONOP_MCAST:
2316                         seq_puts(s, "\taction: Multicast\n");
2317                         break;
2318                 case NIX_TX_ACTIONOP_DROP_VIOL:
2319                         seq_puts(s, "\taction: Lockdown Violation Drop\n");
2320                         break;
2321                 default:
2322                         break;
2323                 }
2324         } else {
2325                 switch (rule->rx_action.op) {
2326                 case NIX_RX_ACTIONOP_DROP:
2327                         seq_puts(s, "\taction: Drop\n");
2328                         break;
2329                 case NIX_RX_ACTIONOP_UCAST:
2330                         seq_printf(s, "\taction: Direct to queue %d\n",
2331                                    rule->rx_action.index);
2332                         break;
2333                 case NIX_RX_ACTIONOP_RSS:
2334                         seq_puts(s, "\taction: RSS\n");
2335                         break;
2336                 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2337                         seq_puts(s, "\taction: Unicast ipsec\n");
2338                         break;
2339                 case NIX_RX_ACTIONOP_MCAST:
2340                         seq_puts(s, "\taction: Multicast\n");
2341                         break;
2342                 default:
2343                         break;
2344                 }
2345         }
2346 }
2347
2348 static const char *rvu_dbg_get_intf_name(int intf)
2349 {
2350         switch (intf) {
2351         case NIX_INTFX_RX(0):
2352                 return "NIX0_RX";
2353         case NIX_INTFX_RX(1):
2354                 return "NIX1_RX";
2355         case NIX_INTFX_TX(0):
2356                 return "NIX0_TX";
2357         case NIX_INTFX_TX(1):
2358                 return "NIX1_TX";
2359         default:
2360                 break;
2361         }
2362
2363         return "unknown";
2364 }
2365
2366 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2367 {
2368         struct rvu_npc_mcam_rule *iter;
2369         struct rvu *rvu = s->private;
2370         struct npc_mcam *mcam;
2371         int pf, vf = -1;
2372         bool enabled;
2373         int blkaddr;
2374         u16 target;
2375         u64 hits;
2376
2377         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2378         if (blkaddr < 0)
2379                 return 0;
2380
2381         mcam = &rvu->hw->mcam;
2382
2383         mutex_lock(&mcam->lock);
2384         list_for_each_entry(iter, &mcam->mcam_rules, list) {
2385                 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2386                 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2387
2388                 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2389                         vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2390                         seq_printf(s, "VF%d", vf);
2391                 }
2392                 seq_puts(s, "\n");
2393
2394                 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2395                                                     "RX" : "TX");
2396                 seq_printf(s, "\tinterface: %s\n",
2397                            rvu_dbg_get_intf_name(iter->intf));
2398                 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2399
2400                 rvu_dbg_npc_mcam_show_flows(s, iter);
2401                 if (is_npc_intf_rx(iter->intf)) {
2402                         target = iter->rx_action.pf_func;
2403                         pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2404                         seq_printf(s, "\tForward to: PF%d ", pf);
2405
2406                         if (target & RVU_PFVF_FUNC_MASK) {
2407                                 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2408                                 seq_printf(s, "VF%d", vf);
2409                         }
2410                         seq_puts(s, "\n");
2411                 }
2412
2413                 rvu_dbg_npc_mcam_show_action(s, iter);
2414
2415                 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2416                 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2417
2418                 if (!iter->has_cntr)
2419                         continue;
2420                 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2421
2422                 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2423                 seq_printf(s, "\thits: %lld\n", hits);
2424         }
2425         mutex_unlock(&mcam->lock);
2426
2427         return 0;
2428 }
2429
2430 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2431
2432 static void rvu_dbg_npc_init(struct rvu *rvu)
2433 {
2434         rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2435
2436         debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2437                             &rvu_dbg_npc_mcam_info_fops);
2438         debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2439                             &rvu_dbg_npc_mcam_rules_fops);
2440         debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2441                             &rvu_dbg_npc_rx_miss_act_fops);
2442 }
2443
2444 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2445 {
2446         struct cpt_ctx *ctx = filp->private;
2447         u64 busy_sts = 0, free_sts = 0;
2448         u32 e_min = 0, e_max = 0, e, i;
2449         u16 max_ses, max_ies, max_aes;
2450         struct rvu *rvu = ctx->rvu;
2451         int blkaddr = ctx->blkaddr;
2452         u64 reg;
2453
2454         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2455         max_ses = reg & 0xffff;
2456         max_ies = (reg >> 16) & 0xffff;
2457         max_aes = (reg >> 32) & 0xffff;
2458
2459         switch (eng_type) {
2460         case CPT_AE_TYPE:
2461                 e_min = max_ses + max_ies;
2462                 e_max = max_ses + max_ies + max_aes;
2463                 break;
2464         case CPT_SE_TYPE:
2465                 e_min = 0;
2466                 e_max = max_ses;
2467                 break;
2468         case CPT_IE_TYPE:
2469                 e_min = max_ses;
2470                 e_max = max_ses + max_ies;
2471                 break;
2472         default:
2473                 return -EINVAL;
2474         }
2475
2476         for (e = e_min, i = 0; e < e_max; e++, i++) {
2477                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2478                 if (reg & 0x1)
2479                         busy_sts |= 1ULL << i;
2480
2481                 if (reg & 0x2)
2482                         free_sts |= 1ULL << i;
2483         }
2484         seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2485         seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2486
2487         return 0;
2488 }
2489
2490 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2491 {
2492         return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2493 }
2494
2495 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2496
2497 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2498 {
2499         return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2500 }
2501
2502 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2503
2504 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2505 {
2506         return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2507 }
2508
2509 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2510
2511 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2512 {
2513         struct cpt_ctx *ctx = filp->private;
2514         u16 max_ses, max_ies, max_aes;
2515         struct rvu *rvu = ctx->rvu;
2516         int blkaddr = ctx->blkaddr;
2517         u32 e_max, e;
2518         u64 reg;
2519
2520         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2521         max_ses = reg & 0xffff;
2522         max_ies = (reg >> 16) & 0xffff;
2523         max_aes = (reg >> 32) & 0xffff;
2524
2525         e_max = max_ses + max_ies + max_aes;
2526
2527         seq_puts(filp, "===========================================\n");
2528         for (e = 0; e < e_max; e++) {
2529                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2530                 seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2531                            reg & 0xff);
2532                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2533                 seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2534                            reg);
2535                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2536                 seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2537                            reg);
2538                 seq_puts(filp, "===========================================\n");
2539         }
2540         return 0;
2541 }
2542
2543 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2544
2545 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2546 {
2547         struct cpt_ctx *ctx = filp->private;
2548         int blkaddr = ctx->blkaddr;
2549         struct rvu *rvu = ctx->rvu;
2550         struct rvu_block *block;
2551         struct rvu_hwinfo *hw;
2552         u64 reg;
2553         u32 lf;
2554
2555         hw = rvu->hw;
2556         block = &hw->block[blkaddr];
2557         if (!block->lf.bmap)
2558                 return -ENODEV;
2559
2560         seq_puts(filp, "===========================================\n");
2561         for (lf = 0; lf < block->lf.max; lf++) {
2562                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2563                 seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2564                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2565                 seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2566                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2567                 seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2568                 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2569                                 (lf << block->lfshift));
2570                 seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2571                 seq_puts(filp, "===========================================\n");
2572         }
2573         return 0;
2574 }
2575
2576 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2577
2578 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2579 {
2580         struct cpt_ctx *ctx = filp->private;
2581         struct rvu *rvu = ctx->rvu;
2582         int blkaddr = ctx->blkaddr;
2583         u64 reg0, reg1;
2584
2585         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2586         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2587         seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2588         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2589         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2590         seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2591         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2592         seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2593         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2594         seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2595         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2596         seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2597         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2598         seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2599
2600         return 0;
2601 }
2602
2603 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2604
2605 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2606 {
2607         struct cpt_ctx *ctx = filp->private;
2608         struct rvu *rvu = ctx->rvu;
2609         int blkaddr = ctx->blkaddr;
2610         u64 reg;
2611
2612         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2613         seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2614         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2615         seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2616         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2617         seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2618         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2619         seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2620         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2621         seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2622         reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2623         seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2624         reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2625         seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2626
2627         return 0;
2628 }
2629
2630 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2631
2632 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2633 {
2634         struct cpt_ctx *ctx;
2635
2636         if (!is_block_implemented(rvu->hw, blkaddr))
2637                 return;
2638
2639         if (blkaddr == BLKADDR_CPT0) {
2640                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2641                 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2642                 ctx->blkaddr = BLKADDR_CPT0;
2643                 ctx->rvu = rvu;
2644         } else {
2645                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2646                                                       rvu->rvu_dbg.root);
2647                 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2648                 ctx->blkaddr = BLKADDR_CPT1;
2649                 ctx->rvu = rvu;
2650         }
2651
2652         debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2653                             &rvu_dbg_cpt_pc_fops);
2654         debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2655                             &rvu_dbg_cpt_ae_sts_fops);
2656         debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2657                             &rvu_dbg_cpt_se_sts_fops);
2658         debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2659                             &rvu_dbg_cpt_ie_sts_fops);
2660         debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2661                             &rvu_dbg_cpt_engines_info_fops);
2662         debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2663                             &rvu_dbg_cpt_lfs_info_fops);
2664         debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2665                             &rvu_dbg_cpt_err_info_fops);
2666 }
2667
2668 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2669 {
2670         if (!is_rvu_otx2(rvu))
2671                 return "cn10k";
2672         else
2673                 return "octeontx2";
2674 }
2675
2676 void rvu_dbg_init(struct rvu *rvu)
2677 {
2678         rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2679
2680         debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2681                             &rvu_dbg_rsrc_status_fops);
2682
2683         if (!cgx_get_cgxcnt_max())
2684                 goto create;
2685
2686         if (is_rvu_otx2(rvu))
2687                 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2688                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2689         else
2690                 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2691                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2692
2693 create:
2694         rvu_dbg_npa_init(rvu);
2695         rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2696
2697         rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2698         rvu_dbg_cgx_init(rvu);
2699         rvu_dbg_npc_init(rvu);
2700         rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2701         rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2702 }
2703
2704 void rvu_dbg_exit(struct rvu *rvu)
2705 {
2706         debugfs_remove_recursive(rvu->rvu_dbg.root);
2707 }
2708
2709 #endif /* CONFIG_DEBUG_FS */