1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #ifdef CONFIG_DEBUG_FS
12
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "lmac_common.h"
23 #include "npc.h"
24
25 #define DEBUGFS_DIR_NAME "octeontx2"
26
27 enum {
28 CGX_STAT0,
29 CGX_STAT1,
30 CGX_STAT2,
31 CGX_STAT3,
32 CGX_STAT4,
33 CGX_STAT5,
34 CGX_STAT6,
35 CGX_STAT7,
36 CGX_STAT8,
37 CGX_STAT9,
38 CGX_STAT10,
39 CGX_STAT11,
40 CGX_STAT12,
41 CGX_STAT13,
42 CGX_STAT14,
43 CGX_STAT15,
44 CGX_STAT16,
45 CGX_STAT17,
46 CGX_STAT18,
47 };
48
49 /* NIX TX stats */
50 enum nix_stat_lf_tx {
51 TX_UCAST = 0x0,
52 TX_BCAST = 0x1,
53 TX_MCAST = 0x2,
54 TX_DROP = 0x3,
55 TX_OCTS = 0x4,
56 TX_STATS_ENUM_LAST,
57 };
58
59 /* NIX RX stats */
60 enum nix_stat_lf_rx {
61 RX_OCTS = 0x0,
62 RX_UCAST = 0x1,
63 RX_BCAST = 0x2,
64 RX_MCAST = 0x3,
65 RX_DROP = 0x4,
66 RX_DROP_OCTS = 0x5,
67 RX_FCS = 0x6,
68 RX_ERR = 0x7,
69 RX_DRP_BCAST = 0x8,
70 RX_DRP_MCAST = 0x9,
71 RX_DRP_L3BCAST = 0xa,
72 RX_DRP_L3MCAST = 0xb,
73 RX_STATS_ENUM_LAST,
74 };
75
76 static char *cgx_rx_stats_fields[] = {
77 [CGX_STAT0] = "Received packets",
78 [CGX_STAT1] = "Octets of received packets",
79 [CGX_STAT2] = "Received PAUSE packets",
80 [CGX_STAT3] = "Received PAUSE and control packets",
81 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
82 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
83 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
84 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
85 [CGX_STAT8] = "Error packets",
86 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
87 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
88 [CGX_STAT11] = "NCSI-bound packets dropped",
89 [CGX_STAT12] = "NCSI-bound octets dropped",
90 };
91
92 static char *cgx_tx_stats_fields[] = {
93 [CGX_STAT0] = "Packets dropped due to excessive collisions",
94 [CGX_STAT1] = "Packets dropped due to excessive deferral",
95 [CGX_STAT2] = "Multiple collisions before successful transmission",
96 [CGX_STAT3] = "Single collisions before successful transmission",
97 [CGX_STAT4] = "Total octets sent on the interface",
98 [CGX_STAT5] = "Total frames sent on the interface",
99 [CGX_STAT6] = "Packets sent with an octet count < 64",
100 [CGX_STAT7] = "Packets sent with an octet count == 64",
101 [CGX_STAT8] = "Packets sent with an octet count of 65–127",
102 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
103 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
104 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
105 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
106 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
107 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
108 [CGX_STAT15] = "Packets sent to the multicast DMAC",
109 [CGX_STAT16] = "Transmit underflow and were truncated",
110 [CGX_STAT17] = "Control/PAUSE packets sent",
111 };
112
113 static char *rpm_rx_stats_fields[] = {
114 "Octets of received packets",
115 "Octets of received packets with out error",
116 "Received packets with alignment errors",
117 "Control/PAUSE packets received",
118 "Packets received with Frame too long Errors",
119 "Packets received with a1nrange length Errors",
120 "Received packets",
121 "Packets received with FrameCheckSequenceErrors",
122 "Packets received with VLAN header",
123 "Error packets",
124 "Packets received with unicast DMAC",
125 "Packets received with multicast DMAC",
126 "Packets received with broadcast DMAC",
127 "Dropped packets",
128 "Total frames received on interface",
129 "Packets received with an octet count < 64",
130 "Packets received with an octet count == 64",
131 "Packets received with an octet count of 65â127",
132 "Packets received with an octet count of 128-255",
133 "Packets received with an octet count of 256-511",
134 "Packets received with an octet count of 512-1023",
135 "Packets received with an octet count of 1024-1518",
136 "Packets received with an octet count of > 1518",
137 "Oversized Packets",
138 "Jabber Packets",
139 "Fragmented Packets",
140 "CBFC(class based flow control) pause frames received for class 0",
141 "CBFC pause frames received for class 1",
142 "CBFC pause frames received for class 2",
143 "CBFC pause frames received for class 3",
144 "CBFC pause frames received for class 4",
145 "CBFC pause frames received for class 5",
146 "CBFC pause frames received for class 6",
147 "CBFC pause frames received for class 7",
148 "CBFC pause frames received for class 8",
149 "CBFC pause frames received for class 9",
150 "CBFC pause frames received for class 10",
151 "CBFC pause frames received for class 11",
152 "CBFC pause frames received for class 12",
153 "CBFC pause frames received for class 13",
154 "CBFC pause frames received for class 14",
155 "CBFC pause frames received for class 15",
156 "MAC control packets received",
157 };
158
159 static char *rpm_tx_stats_fields[] = {
160 "Total octets sent on the interface",
161 "Total octets transmitted OK",
162 "Control/Pause frames sent",
163 "Total frames transmitted OK",
164 "Total frames sent with VLAN header",
165 "Error Packets",
166 "Packets sent to unicast DMAC",
167 "Packets sent to the multicast DMAC",
168 "Packets sent to a broadcast DMAC",
169 "Packets sent with an octet count == 64",
170 "Packets sent with an octet count of 65â127",
171 "Packets sent with an octet count of 128-255",
172 "Packets sent with an octet count of 256-511",
173 "Packets sent with an octet count of 512-1023",
174 "Packets sent with an octet count of 1024-1518",
175 "Packets sent with an octet count of > 1518",
176 "CBFC(class based flow control) pause frames transmitted for class 0",
177 "CBFC pause frames transmitted for class 1",
178 "CBFC pause frames transmitted for class 2",
179 "CBFC pause frames transmitted for class 3",
180 "CBFC pause frames transmitted for class 4",
181 "CBFC pause frames transmitted for class 5",
182 "CBFC pause frames transmitted for class 6",
183 "CBFC pause frames transmitted for class 7",
184 "CBFC pause frames transmitted for class 8",
185 "CBFC pause frames transmitted for class 9",
186 "CBFC pause frames transmitted for class 10",
187 "CBFC pause frames transmitted for class 11",
188 "CBFC pause frames transmitted for class 12",
189 "CBFC pause frames transmitted for class 13",
190 "CBFC pause frames transmitted for class 14",
191 "CBFC pause frames transmitted for class 15",
192 "MAC control packets sent",
193 "Total frames sent on the interface"
194 };
195
196 enum cpt_eng_type {
197 CPT_AE_TYPE = 1,
198 CPT_SE_TYPE = 2,
199 CPT_IE_TYPE = 3,
200 };
201
202 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
203 blk_addr, NDC_AF_CONST) & 0xFF)
204
205 #define rvu_dbg_NULL NULL
206 #define rvu_dbg_open_NULL NULL
207
208 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
209 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
210 { \
211 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
212 } \
213 static const struct file_operations rvu_dbg_##name##_fops = { \
214 .owner = THIS_MODULE, \
215 .open = rvu_dbg_open_##name, \
216 .read = seq_read, \
217 .write = rvu_dbg_##write_op, \
218 .llseek = seq_lseek, \
219 .release = single_release, \
220 }
221
222 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
223 static const struct file_operations rvu_dbg_##name##_fops = { \
224 .owner = THIS_MODULE, \
225 .open = simple_open, \
226 .read = rvu_dbg_##read_op, \
227 .write = rvu_dbg_##write_op \
228 }
229
230 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
231
232 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)233 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
234 char __user *buffer,
235 size_t count, loff_t *ppos)
236 {
237 int index, off = 0, flag = 0, go_back = 0, len = 0;
238 struct rvu *rvu = filp->private_data;
239 int lf, pf, vf, pcifunc;
240 struct rvu_block block;
241 int bytes_not_copied;
242 int lf_str_size = 12;
243 int buf_size = 2048;
244 char *lfs;
245 char *buf;
246
247 /* don't allow partial reads */
248 if (*ppos != 0)
249 return 0;
250
251 buf = kzalloc(buf_size, GFP_KERNEL);
252 if (!buf)
253 return -ENOSPC;
254
255 lfs = kzalloc(lf_str_size, GFP_KERNEL);
256 if (!lfs) {
257 kfree(buf);
258 return -ENOMEM;
259 }
260 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
261 "pcifunc");
262 for (index = 0; index < BLK_COUNT; index++)
263 if (strlen(rvu->hw->block[index].name)) {
264 off += scnprintf(&buf[off], buf_size - 1 - off,
265 "%-*s", lf_str_size,
266 rvu->hw->block[index].name);
267 }
268 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
269 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
270 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
271 pcifunc = pf << 10 | vf;
272 if (!pcifunc)
273 continue;
274
275 if (vf) {
276 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
277 go_back = scnprintf(&buf[off],
278 buf_size - 1 - off,
279 "%-*s", lf_str_size, lfs);
280 } else {
281 sprintf(lfs, "PF%d", pf);
282 go_back = scnprintf(&buf[off],
283 buf_size - 1 - off,
284 "%-*s", lf_str_size, lfs);
285 }
286
287 off += go_back;
288 for (index = 0; index < BLKTYPE_MAX; index++) {
289 block = rvu->hw->block[index];
290 if (!strlen(block.name))
291 continue;
292 len = 0;
293 lfs[len] = '\0';
294 for (lf = 0; lf < block.lf.max; lf++) {
295 if (block.fn_map[lf] != pcifunc)
296 continue;
297 flag = 1;
298 len += sprintf(&lfs[len], "%d,", lf);
299 }
300
301 if (flag)
302 len--;
303 lfs[len] = '\0';
304 off += scnprintf(&buf[off], buf_size - 1 - off,
305 "%-*s", lf_str_size, lfs);
306 if (!strlen(lfs))
307 go_back += lf_str_size;
308 }
309 if (!flag)
310 off -= go_back;
311 else
312 flag = 0;
313 off--;
314 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
315 }
316 }
317
318 bytes_not_copied = copy_to_user(buffer, buf, off);
319 kfree(lfs);
320 kfree(buf);
321
322 if (bytes_not_copied)
323 return -EFAULT;
324
325 *ppos = off;
326 return off;
327 }
328
329 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
330
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)331 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
332 {
333 struct rvu *rvu = filp->private;
334 struct pci_dev *pdev = NULL;
335 struct mac_ops *mac_ops;
336 char cgx[10], lmac[10];
337 struct rvu_pfvf *pfvf;
338 int pf, domain, blkid;
339 u8 cgx_id, lmac_id;
340 u16 pcifunc;
341
342 domain = 2;
343 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
344 /* There can be no CGX devices at all */
345 if (!mac_ops)
346 return 0;
347 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
348 mac_ops->name);
349 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
350 if (!is_pf_cgxmapped(rvu, pf))
351 continue;
352
353 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
354 if (!pdev)
355 continue;
356
357 cgx[0] = 0;
358 lmac[0] = 0;
359 pcifunc = pf << 10;
360 pfvf = rvu_get_pfvf(rvu, pcifunc);
361
362 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
363 blkid = 0;
364 else
365 blkid = 1;
366
367 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
368 &lmac_id);
369 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
370 sprintf(lmac, "LMAC%d", lmac_id);
371 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
372 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
373 }
374 return 0;
375 }
376
377 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
378
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)379 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
380 u16 *pcifunc)
381 {
382 struct rvu_block *block;
383 struct rvu_hwinfo *hw;
384
385 hw = rvu->hw;
386 block = &hw->block[blkaddr];
387
388 if (lf < 0 || lf >= block->lf.max) {
389 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
390 block->lf.max - 1);
391 return false;
392 }
393
394 *pcifunc = block->fn_map[lf];
395 if (!*pcifunc) {
396 dev_warn(rvu->dev,
397 "This LF is not attached to any RVU PFFUNC\n");
398 return false;
399 }
400 return true;
401 }
402
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)403 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
404 {
405 char *buf;
406
407 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
408 if (!buf)
409 return;
410
411 if (!pfvf->aura_ctx) {
412 seq_puts(m, "Aura context is not initialized\n");
413 } else {
414 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
415 pfvf->aura_ctx->qsize);
416 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
417 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
418 }
419
420 if (!pfvf->pool_ctx) {
421 seq_puts(m, "Pool context is not initialized\n");
422 } else {
423 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
424 pfvf->pool_ctx->qsize);
425 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
426 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
427 }
428 kfree(buf);
429 }
430
431 /* The 'qsize' entry dumps current Aura/Pool context Qsize
432 * and each context's current enable/disable status in a bitmap.
433 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)434 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
435 int blktype)
436 {
437 void (*print_qsize)(struct seq_file *filp,
438 struct rvu_pfvf *pfvf) = NULL;
439 struct dentry *current_dir;
440 struct rvu_pfvf *pfvf;
441 struct rvu *rvu;
442 int qsize_id;
443 u16 pcifunc;
444 int blkaddr;
445
446 rvu = filp->private;
447 switch (blktype) {
448 case BLKTYPE_NPA:
449 qsize_id = rvu->rvu_dbg.npa_qsize_id;
450 print_qsize = print_npa_qsize;
451 break;
452
453 case BLKTYPE_NIX:
454 qsize_id = rvu->rvu_dbg.nix_qsize_id;
455 print_qsize = print_nix_qsize;
456 break;
457
458 default:
459 return -EINVAL;
460 }
461
462 if (blktype == BLKTYPE_NPA) {
463 blkaddr = BLKADDR_NPA;
464 } else {
465 current_dir = filp->file->f_path.dentry->d_parent;
466 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
467 BLKADDR_NIX1 : BLKADDR_NIX0);
468 }
469
470 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
471 return -EINVAL;
472
473 pfvf = rvu_get_pfvf(rvu, pcifunc);
474 print_qsize(filp, pfvf);
475
476 return 0;
477 }
478
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)479 static ssize_t rvu_dbg_qsize_write(struct file *filp,
480 const char __user *buffer, size_t count,
481 loff_t *ppos, int blktype)
482 {
483 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
484 struct seq_file *seqfile = filp->private_data;
485 char *cmd_buf, *cmd_buf_tmp, *subtoken;
486 struct rvu *rvu = seqfile->private;
487 struct dentry *current_dir;
488 int blkaddr;
489 u16 pcifunc;
490 int ret, lf;
491
492 cmd_buf = memdup_user(buffer, count + 1);
493 if (IS_ERR(cmd_buf))
494 return -ENOMEM;
495
496 cmd_buf[count] = '\0';
497
498 cmd_buf_tmp = strchr(cmd_buf, '\n');
499 if (cmd_buf_tmp) {
500 *cmd_buf_tmp = '\0';
501 count = cmd_buf_tmp - cmd_buf + 1;
502 }
503
504 cmd_buf_tmp = cmd_buf;
505 subtoken = strsep(&cmd_buf, " ");
506 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
507 if (cmd_buf)
508 ret = -EINVAL;
509
510 if (!strncmp(subtoken, "help", 4) || ret < 0) {
511 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
512 goto qsize_write_done;
513 }
514
515 if (blktype == BLKTYPE_NPA) {
516 blkaddr = BLKADDR_NPA;
517 } else {
518 current_dir = filp->f_path.dentry->d_parent;
519 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
520 BLKADDR_NIX1 : BLKADDR_NIX0);
521 }
522
523 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
524 ret = -EINVAL;
525 goto qsize_write_done;
526 }
527 if (blktype == BLKTYPE_NPA)
528 rvu->rvu_dbg.npa_qsize_id = lf;
529 else
530 rvu->rvu_dbg.nix_qsize_id = lf;
531
532 qsize_write_done:
533 kfree(cmd_buf_tmp);
534 return ret ? ret : count;
535 }
536
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)537 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
538 const char __user *buffer,
539 size_t count, loff_t *ppos)
540 {
541 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
542 BLKTYPE_NPA);
543 }
544
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)545 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
546 {
547 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
548 }
549
550 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
551
552 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)553 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
554 {
555 struct npa_aura_s *aura = &rsp->aura;
556 struct rvu *rvu = m->private;
557
558 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
559
560 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
561 aura->ena, aura->pool_caching);
562 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
563 aura->pool_way_mask, aura->avg_con);
564 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
565 aura->pool_drop_ena, aura->aura_drop_ena);
566 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
567 aura->bp_ena, aura->aura_drop);
568 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
569 aura->shift, aura->avg_level);
570
571 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
572 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
573
574 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
575 (u64)aura->limit, aura->bp, aura->fc_ena);
576
577 if (!is_rvu_otx2(rvu))
578 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
579 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
580 aura->fc_up_crossing, aura->fc_stype);
581 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
582
583 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
584
585 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
586 aura->pool_drop, aura->update_time);
587 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
588 aura->err_int, aura->err_int_ena);
589 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
590 aura->thresh_int, aura->thresh_int_ena);
591 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
592 aura->thresh_up, aura->thresh_qint_idx);
593 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
594
595 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
596 if (!is_rvu_otx2(rvu))
597 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
598 }
599
600 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)601 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
602 {
603 struct npa_pool_s *pool = &rsp->pool;
604 struct rvu *rvu = m->private;
605
606 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
607
608 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
609 pool->ena, pool->nat_align);
610 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
611 pool->stack_caching, pool->stack_way_mask);
612 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
613 pool->buf_offset, pool->buf_size);
614
615 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
616 pool->stack_max_pages, pool->stack_pages);
617
618 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
619
620 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
621 pool->stack_offset, pool->shift, pool->avg_level);
622 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
623 pool->avg_con, pool->fc_ena, pool->fc_stype);
624 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
625 pool->fc_hyst_bits, pool->fc_up_crossing);
626 if (!is_rvu_otx2(rvu))
627 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
628 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
629
630 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
631
632 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
633
634 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
635
636 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
637 pool->err_int, pool->err_int_ena);
638 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
639 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
640 pool->thresh_int_ena, pool->thresh_up);
641 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
642 pool->thresh_qint_idx, pool->err_qint_idx);
643 if (!is_rvu_otx2(rvu))
644 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
645 }
646
647 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)648 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
649 {
650 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
651 struct npa_aq_enq_req aq_req;
652 struct npa_aq_enq_rsp rsp;
653 struct rvu_pfvf *pfvf;
654 int aura, rc, max_id;
655 int npalf, id, all;
656 struct rvu *rvu;
657 u16 pcifunc;
658
659 rvu = m->private;
660
661 switch (ctype) {
662 case NPA_AQ_CTYPE_AURA:
663 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
664 id = rvu->rvu_dbg.npa_aura_ctx.id;
665 all = rvu->rvu_dbg.npa_aura_ctx.all;
666 break;
667
668 case NPA_AQ_CTYPE_POOL:
669 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
670 id = rvu->rvu_dbg.npa_pool_ctx.id;
671 all = rvu->rvu_dbg.npa_pool_ctx.all;
672 break;
673 default:
674 return -EINVAL;
675 }
676
677 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
678 return -EINVAL;
679
680 pfvf = rvu_get_pfvf(rvu, pcifunc);
681 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
682 seq_puts(m, "Aura context is not initialized\n");
683 return -EINVAL;
684 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
685 seq_puts(m, "Pool context is not initialized\n");
686 return -EINVAL;
687 }
688
689 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
690 aq_req.hdr.pcifunc = pcifunc;
691 aq_req.ctype = ctype;
692 aq_req.op = NPA_AQ_INSTOP_READ;
693 if (ctype == NPA_AQ_CTYPE_AURA) {
694 max_id = pfvf->aura_ctx->qsize;
695 print_npa_ctx = print_npa_aura_ctx;
696 } else {
697 max_id = pfvf->pool_ctx->qsize;
698 print_npa_ctx = print_npa_pool_ctx;
699 }
700
701 if (id < 0 || id >= max_id) {
702 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
703 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
704 max_id - 1);
705 return -EINVAL;
706 }
707
708 if (all)
709 id = 0;
710 else
711 max_id = id + 1;
712
713 for (aura = id; aura < max_id; aura++) {
714 aq_req.aura_id = aura;
715 seq_printf(m, "======%s : %d=======\n",
716 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
717 aq_req.aura_id);
718 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
719 if (rc) {
720 seq_puts(m, "Failed to read context\n");
721 return -EINVAL;
722 }
723 print_npa_ctx(m, &rsp);
724 }
725 return 0;
726 }
727
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)728 static int write_npa_ctx(struct rvu *rvu, bool all,
729 int npalf, int id, int ctype)
730 {
731 struct rvu_pfvf *pfvf;
732 int max_id = 0;
733 u16 pcifunc;
734
735 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
736 return -EINVAL;
737
738 pfvf = rvu_get_pfvf(rvu, pcifunc);
739
740 if (ctype == NPA_AQ_CTYPE_AURA) {
741 if (!pfvf->aura_ctx) {
742 dev_warn(rvu->dev, "Aura context is not initialized\n");
743 return -EINVAL;
744 }
745 max_id = pfvf->aura_ctx->qsize;
746 } else if (ctype == NPA_AQ_CTYPE_POOL) {
747 if (!pfvf->pool_ctx) {
748 dev_warn(rvu->dev, "Pool context is not initialized\n");
749 return -EINVAL;
750 }
751 max_id = pfvf->pool_ctx->qsize;
752 }
753
754 if (id < 0 || id >= max_id) {
755 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
756 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
757 max_id - 1);
758 return -EINVAL;
759 }
760
761 switch (ctype) {
762 case NPA_AQ_CTYPE_AURA:
763 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
764 rvu->rvu_dbg.npa_aura_ctx.id = id;
765 rvu->rvu_dbg.npa_aura_ctx.all = all;
766 break;
767
768 case NPA_AQ_CTYPE_POOL:
769 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
770 rvu->rvu_dbg.npa_pool_ctx.id = id;
771 rvu->rvu_dbg.npa_pool_ctx.all = all;
772 break;
773 default:
774 return -EINVAL;
775 }
776 return 0;
777 }
778
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)779 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
780 const char __user *buffer, int *npalf,
781 int *id, bool *all)
782 {
783 int bytes_not_copied;
784 char *cmd_buf_tmp;
785 char *subtoken;
786 int ret;
787
788 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
789 if (bytes_not_copied)
790 return -EFAULT;
791
792 cmd_buf[*count] = '\0';
793 cmd_buf_tmp = strchr(cmd_buf, '\n');
794
795 if (cmd_buf_tmp) {
796 *cmd_buf_tmp = '\0';
797 *count = cmd_buf_tmp - cmd_buf + 1;
798 }
799
800 subtoken = strsep(&cmd_buf, " ");
801 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
802 if (ret < 0)
803 return ret;
804 subtoken = strsep(&cmd_buf, " ");
805 if (subtoken && strcmp(subtoken, "all") == 0) {
806 *all = true;
807 } else {
808 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
809 if (ret < 0)
810 return ret;
811 }
812 if (cmd_buf)
813 return -EINVAL;
814 return ret;
815 }
816
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)817 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
818 const char __user *buffer,
819 size_t count, loff_t *ppos, int ctype)
820 {
821 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
822 "aura" : "pool";
823 struct seq_file *seqfp = filp->private_data;
824 struct rvu *rvu = seqfp->private;
825 int npalf, id = 0, ret;
826 bool all = false;
827
828 if ((*ppos != 0) || !count)
829 return -EINVAL;
830
831 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
832 if (!cmd_buf)
833 return count;
834 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
835 &npalf, &id, &all);
836 if (ret < 0) {
837 dev_info(rvu->dev,
838 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
839 ctype_string, ctype_string);
840 goto done;
841 } else {
842 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
843 }
844 done:
845 kfree(cmd_buf);
846 return ret ? ret : count;
847 }
848
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)849 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
850 const char __user *buffer,
851 size_t count, loff_t *ppos)
852 {
853 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
854 NPA_AQ_CTYPE_AURA);
855 }
856
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)857 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
858 {
859 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
860 }
861
862 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
863
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)864 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
865 const char __user *buffer,
866 size_t count, loff_t *ppos)
867 {
868 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
869 NPA_AQ_CTYPE_POOL);
870 }
871
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)872 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
873 {
874 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
875 }
876
877 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
878
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)879 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
880 int ctype, int transaction)
881 {
882 u64 req, out_req, lat, cant_alloc;
883 struct nix_hw *nix_hw;
884 struct rvu *rvu;
885 int port;
886
887 if (blk_addr == BLKADDR_NDC_NPA0) {
888 rvu = s->private;
889 } else {
890 nix_hw = s->private;
891 rvu = nix_hw->rvu;
892 }
893
894 for (port = 0; port < NDC_MAX_PORT; port++) {
895 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
896 (port, ctype, transaction));
897 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
898 (port, ctype, transaction));
899 out_req = rvu_read64(rvu, blk_addr,
900 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
901 (port, ctype, transaction));
902 cant_alloc = rvu_read64(rvu, blk_addr,
903 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
904 (port, transaction));
905 seq_printf(s, "\nPort:%d\n", port);
906 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
907 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
908 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
909 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
910 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
911 }
912 }
913
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)914 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
915 {
916 seq_puts(s, "\n***** CACHE mode read stats *****\n");
917 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
918 seq_puts(s, "\n***** CACHE mode write stats *****\n");
919 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
920 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
921 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
922 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
923 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
924 return 0;
925 }
926
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)927 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
928 {
929 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
930 }
931
932 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
933
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)934 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
935 {
936 struct nix_hw *nix_hw;
937 struct rvu *rvu;
938 int bank, max_bank;
939
940 if (blk_addr == BLKADDR_NDC_NPA0) {
941 rvu = s->private;
942 } else {
943 nix_hw = s->private;
944 rvu = nix_hw->rvu;
945 }
946
947 max_bank = NDC_MAX_BANK(rvu, blk_addr);
948 for (bank = 0; bank < max_bank; bank++) {
949 seq_printf(s, "BANK:%d\n", bank);
950 seq_printf(s, "\tHits:\t%lld\n",
951 (u64)rvu_read64(rvu, blk_addr,
952 NDC_AF_BANKX_HIT_PC(bank)));
953 seq_printf(s, "\tMiss:\t%lld\n",
954 (u64)rvu_read64(rvu, blk_addr,
955 NDC_AF_BANKX_MISS_PC(bank)));
956 }
957 return 0;
958 }
959
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)960 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
961 {
962 struct nix_hw *nix_hw = filp->private;
963 int blkaddr = 0;
964 int ndc_idx = 0;
965
966 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
967 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
968 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
969
970 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
971 }
972
973 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
974
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)975 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
976 {
977 struct nix_hw *nix_hw = filp->private;
978 int blkaddr = 0;
979 int ndc_idx = 0;
980
981 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
982 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
983 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
984
985 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
986 }
987
988 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
989
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)990 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
991 void *unused)
992 {
993 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
994 }
995
996 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
997
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)998 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
999 void *unused)
1000 {
1001 struct nix_hw *nix_hw = filp->private;
1002 int ndc_idx = NPA0_U;
1003 int blkaddr = 0;
1004
1005 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1006 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1007
1008 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1009 }
1010
1011 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1012
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1013 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1014 void *unused)
1015 {
1016 struct nix_hw *nix_hw = filp->private;
1017 int ndc_idx = NPA0_U;
1018 int blkaddr = 0;
1019
1020 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1021 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1022
1023 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1024 }
1025
1026 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1027
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1028 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1029 struct nix_cn10k_sq_ctx_s *sq_ctx)
1030 {
1031 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1032 sq_ctx->ena, sq_ctx->qint_idx);
1033 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1034 sq_ctx->substream, sq_ctx->sdp_mcast);
1035 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1036 sq_ctx->cq, sq_ctx->sqe_way_mask);
1037
1038 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1039 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1040 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1041 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1042 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1043 sq_ctx->default_chan, sq_ctx->sqb_count);
1044
1045 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1046 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1047 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1048 sq_ctx->sqb_aura, sq_ctx->sq_int);
1049 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1050 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1051
1052 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1053 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1054 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1055 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1056 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1057 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1058 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1059 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1060 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1061 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1062
1063 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1064 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1065 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1066 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1067 sq_ctx->smenq_next_sqb);
1068
1069 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1070
1071 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1072 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1073 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1074 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1075 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1076 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1077 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1078
1079 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1080 (u64)sq_ctx->scm_lso_rem);
1081 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1082 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1083 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1084 (u64)sq_ctx->dropped_octs);
1085 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1086 (u64)sq_ctx->dropped_pkts);
1087 }
1088
1089 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1090 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1091 {
1092 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1093 struct nix_hw *nix_hw = m->private;
1094 struct rvu *rvu = nix_hw->rvu;
1095
1096 if (!is_rvu_otx2(rvu)) {
1097 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1098 return;
1099 }
1100 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1101 sq_ctx->sqe_way_mask, sq_ctx->cq);
1102 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1103 sq_ctx->sdp_mcast, sq_ctx->substream);
1104 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1105 sq_ctx->qint_idx, sq_ctx->ena);
1106
1107 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1108 sq_ctx->sqb_count, sq_ctx->default_chan);
1109 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1110 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1111 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1112 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1113
1114 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1115 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1116 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1117 sq_ctx->sq_int, sq_ctx->sqb_aura);
1118 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1119
1120 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1121 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1122 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1123 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1124 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1125 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1126 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1127 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1128 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1129 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1130 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1131 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1132
1133 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1134 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1135 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1136 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1137 sq_ctx->smenq_next_sqb);
1138
1139 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1140
1141 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1142 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1143 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1144 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1145 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1146 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1147 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1148
1149 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1150 (u64)sq_ctx->scm_lso_rem);
1151 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1152 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1153 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1154 (u64)sq_ctx->dropped_octs);
1155 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1156 (u64)sq_ctx->dropped_pkts);
1157 }
1158
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)1159 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1160 struct nix_cn10k_rq_ctx_s *rq_ctx)
1161 {
1162 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1163 rq_ctx->ena, rq_ctx->sso_ena);
1164 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1165 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1166 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1167 rq_ctx->cq, rq_ctx->lenerr_dis);
1168 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1169 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1170 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1171 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1172 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1173 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1174 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1175
1176 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1177 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1178 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1179 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1180 rq_ctx->sso_grp, rq_ctx->sso_tt);
1181 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1182 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1183 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1184 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1185 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1186 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1187 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1188 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1189
1190 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1191 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1192 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1193 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1194 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1195 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1196 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1197 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1198 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1199 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1200 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1201
1202 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1203 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1204 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1205 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1206 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1207 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1208 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1209 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1210
1211 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1212 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1213 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1214 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1215 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1216 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1217 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1218
1219 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1220 rq_ctx->ltag, rq_ctx->good_utag);
1221 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1222 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1223 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1224 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1225 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1226 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1227 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1228
1229 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1230 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1231 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1232 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1233 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1234 }
1235
1236 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1237 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1238 {
1239 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1240 struct nix_hw *nix_hw = m->private;
1241 struct rvu *rvu = nix_hw->rvu;
1242
1243 if (!is_rvu_otx2(rvu)) {
1244 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1245 return;
1246 }
1247
1248 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1249 rq_ctx->wqe_aura, rq_ctx->substream);
1250 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1251 rq_ctx->cq, rq_ctx->ena_wqwd);
1252 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1253 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1254 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1255
1256 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1257 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1258 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1259 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1260 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1261 rq_ctx->pb_caching, rq_ctx->sso_tt);
1262 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1263 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1264 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1265
1266 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1267 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1268 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1269 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1270 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1271 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1272 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1273 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1274 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1275
1276 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1277 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1278 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1279 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1280 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1281 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1282 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1283 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1284
1285 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1286 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1287 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1288 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1289 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1290 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1291 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1292
1293 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1294 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1295 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1296 rq_ctx->good_utag, rq_ctx->ltag);
1297
1298 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1299 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1300 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1301 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1302 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1303 }
1304
1305 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1306 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1307 {
1308 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1309
1310 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1311
1312 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1313 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1314 cq_ctx->avg_con, cq_ctx->cint_idx);
1315 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1316 cq_ctx->cq_err, cq_ctx->qint_idx);
1317 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1318 cq_ctx->bpid, cq_ctx->bp_ena);
1319
1320 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1321 cq_ctx->update_time, cq_ctx->avg_level);
1322 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1323 cq_ctx->head, cq_ctx->tail);
1324
1325 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1326 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1327 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1328 cq_ctx->qsize, cq_ctx->caching);
1329 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1330 cq_ctx->substream, cq_ctx->ena);
1331 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1332 cq_ctx->drop_ena, cq_ctx->drop);
1333 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1334 }
1335
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1336 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1337 void *unused, int ctype)
1338 {
1339 void (*print_nix_ctx)(struct seq_file *filp,
1340 struct nix_aq_enq_rsp *rsp) = NULL;
1341 struct nix_hw *nix_hw = filp->private;
1342 struct rvu *rvu = nix_hw->rvu;
1343 struct nix_aq_enq_req aq_req;
1344 struct nix_aq_enq_rsp rsp;
1345 char *ctype_string = NULL;
1346 int qidx, rc, max_id = 0;
1347 struct rvu_pfvf *pfvf;
1348 int nixlf, id, all;
1349 u16 pcifunc;
1350
1351 switch (ctype) {
1352 case NIX_AQ_CTYPE_CQ:
1353 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1354 id = rvu->rvu_dbg.nix_cq_ctx.id;
1355 all = rvu->rvu_dbg.nix_cq_ctx.all;
1356 break;
1357
1358 case NIX_AQ_CTYPE_SQ:
1359 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1360 id = rvu->rvu_dbg.nix_sq_ctx.id;
1361 all = rvu->rvu_dbg.nix_sq_ctx.all;
1362 break;
1363
1364 case NIX_AQ_CTYPE_RQ:
1365 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1366 id = rvu->rvu_dbg.nix_rq_ctx.id;
1367 all = rvu->rvu_dbg.nix_rq_ctx.all;
1368 break;
1369
1370 default:
1371 return -EINVAL;
1372 }
1373
1374 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1375 return -EINVAL;
1376
1377 pfvf = rvu_get_pfvf(rvu, pcifunc);
1378 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1379 seq_puts(filp, "SQ context is not initialized\n");
1380 return -EINVAL;
1381 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1382 seq_puts(filp, "RQ context is not initialized\n");
1383 return -EINVAL;
1384 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1385 seq_puts(filp, "CQ context is not initialized\n");
1386 return -EINVAL;
1387 }
1388
1389 if (ctype == NIX_AQ_CTYPE_SQ) {
1390 max_id = pfvf->sq_ctx->qsize;
1391 ctype_string = "sq";
1392 print_nix_ctx = print_nix_sq_ctx;
1393 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1394 max_id = pfvf->rq_ctx->qsize;
1395 ctype_string = "rq";
1396 print_nix_ctx = print_nix_rq_ctx;
1397 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1398 max_id = pfvf->cq_ctx->qsize;
1399 ctype_string = "cq";
1400 print_nix_ctx = print_nix_cq_ctx;
1401 }
1402
1403 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1404 aq_req.hdr.pcifunc = pcifunc;
1405 aq_req.ctype = ctype;
1406 aq_req.op = NIX_AQ_INSTOP_READ;
1407 if (all)
1408 id = 0;
1409 else
1410 max_id = id + 1;
1411 for (qidx = id; qidx < max_id; qidx++) {
1412 aq_req.qidx = qidx;
1413 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1414 ctype_string, nixlf, aq_req.qidx);
1415 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1416 if (rc) {
1417 seq_puts(filp, "Failed to read the context\n");
1418 return -EINVAL;
1419 }
1420 print_nix_ctx(filp, &rsp);
1421 }
1422 return 0;
1423 }
1424
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)1425 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1426 int id, int ctype, char *ctype_string,
1427 struct seq_file *m)
1428 {
1429 struct nix_hw *nix_hw = m->private;
1430 struct rvu_pfvf *pfvf;
1431 int max_id = 0;
1432 u16 pcifunc;
1433
1434 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1435 return -EINVAL;
1436
1437 pfvf = rvu_get_pfvf(rvu, pcifunc);
1438
1439 if (ctype == NIX_AQ_CTYPE_SQ) {
1440 if (!pfvf->sq_ctx) {
1441 dev_warn(rvu->dev, "SQ context is not initialized\n");
1442 return -EINVAL;
1443 }
1444 max_id = pfvf->sq_ctx->qsize;
1445 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1446 if (!pfvf->rq_ctx) {
1447 dev_warn(rvu->dev, "RQ context is not initialized\n");
1448 return -EINVAL;
1449 }
1450 max_id = pfvf->rq_ctx->qsize;
1451 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1452 if (!pfvf->cq_ctx) {
1453 dev_warn(rvu->dev, "CQ context is not initialized\n");
1454 return -EINVAL;
1455 }
1456 max_id = pfvf->cq_ctx->qsize;
1457 }
1458
1459 if (id < 0 || id >= max_id) {
1460 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1461 ctype_string, max_id - 1);
1462 return -EINVAL;
1463 }
1464 switch (ctype) {
1465 case NIX_AQ_CTYPE_CQ:
1466 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1467 rvu->rvu_dbg.nix_cq_ctx.id = id;
1468 rvu->rvu_dbg.nix_cq_ctx.all = all;
1469 break;
1470
1471 case NIX_AQ_CTYPE_SQ:
1472 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1473 rvu->rvu_dbg.nix_sq_ctx.id = id;
1474 rvu->rvu_dbg.nix_sq_ctx.all = all;
1475 break;
1476
1477 case NIX_AQ_CTYPE_RQ:
1478 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1479 rvu->rvu_dbg.nix_rq_ctx.id = id;
1480 rvu->rvu_dbg.nix_rq_ctx.all = all;
1481 break;
1482 default:
1483 return -EINVAL;
1484 }
1485 return 0;
1486 }
1487
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1488 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1489 const char __user *buffer,
1490 size_t count, loff_t *ppos,
1491 int ctype)
1492 {
1493 struct seq_file *m = filp->private_data;
1494 struct nix_hw *nix_hw = m->private;
1495 struct rvu *rvu = nix_hw->rvu;
1496 char *cmd_buf, *ctype_string;
1497 int nixlf, id = 0, ret;
1498 bool all = false;
1499
1500 if ((*ppos != 0) || !count)
1501 return -EINVAL;
1502
1503 switch (ctype) {
1504 case NIX_AQ_CTYPE_SQ:
1505 ctype_string = "sq";
1506 break;
1507 case NIX_AQ_CTYPE_RQ:
1508 ctype_string = "rq";
1509 break;
1510 case NIX_AQ_CTYPE_CQ:
1511 ctype_string = "cq";
1512 break;
1513 default:
1514 return -EINVAL;
1515 }
1516
1517 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1518
1519 if (!cmd_buf)
1520 return count;
1521
1522 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1523 &nixlf, &id, &all);
1524 if (ret < 0) {
1525 dev_info(rvu->dev,
1526 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1527 ctype_string, ctype_string);
1528 goto done;
1529 } else {
1530 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1531 ctype_string, m);
1532 }
1533 done:
1534 kfree(cmd_buf);
1535 return ret ? ret : count;
1536 }
1537
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1538 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1539 const char __user *buffer,
1540 size_t count, loff_t *ppos)
1541 {
1542 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1543 NIX_AQ_CTYPE_SQ);
1544 }
1545
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)1546 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1547 {
1548 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1549 }
1550
1551 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1552
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1553 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1554 const char __user *buffer,
1555 size_t count, loff_t *ppos)
1556 {
1557 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1558 NIX_AQ_CTYPE_RQ);
1559 }
1560
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)1561 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1562 {
1563 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1564 }
1565
1566 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1567
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1568 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1569 const char __user *buffer,
1570 size_t count, loff_t *ppos)
1571 {
1572 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1573 NIX_AQ_CTYPE_CQ);
1574 }
1575
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)1576 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1577 {
1578 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1579 }
1580
1581 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1582
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)1583 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1584 unsigned long *bmap, char *qtype)
1585 {
1586 char *buf;
1587
1588 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1589 if (!buf)
1590 return;
1591
1592 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1593 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1594 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1595 qtype, buf);
1596 kfree(buf);
1597 }
1598
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)1599 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1600 {
1601 if (!pfvf->cq_ctx)
1602 seq_puts(filp, "cq context is not initialized\n");
1603 else
1604 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1605 "cq");
1606
1607 if (!pfvf->rq_ctx)
1608 seq_puts(filp, "rq context is not initialized\n");
1609 else
1610 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1611 "rq");
1612
1613 if (!pfvf->sq_ctx)
1614 seq_puts(filp, "sq context is not initialized\n");
1615 else
1616 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1617 "sq");
1618 }
1619
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1620 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1621 const char __user *buffer,
1622 size_t count, loff_t *ppos)
1623 {
1624 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1625 BLKTYPE_NIX);
1626 }
1627
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)1628 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1629 {
1630 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1631 }
1632
1633 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1634
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)1635 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1636 {
1637 struct nix_hw *nix_hw;
1638
1639 if (!is_block_implemented(rvu->hw, blkaddr))
1640 return;
1641
1642 if (blkaddr == BLKADDR_NIX0) {
1643 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1644 nix_hw = &rvu->hw->nix[0];
1645 } else {
1646 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1647 rvu->rvu_dbg.root);
1648 nix_hw = &rvu->hw->nix[1];
1649 }
1650
1651 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1652 &rvu_dbg_nix_sq_ctx_fops);
1653 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1654 &rvu_dbg_nix_rq_ctx_fops);
1655 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1656 &rvu_dbg_nix_cq_ctx_fops);
1657 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1658 &rvu_dbg_nix_ndc_tx_cache_fops);
1659 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1660 &rvu_dbg_nix_ndc_rx_cache_fops);
1661 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1662 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1663 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1664 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1665 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1666 &rvu_dbg_nix_qsize_fops);
1667 }
1668
rvu_dbg_npa_init(struct rvu * rvu)1669 static void rvu_dbg_npa_init(struct rvu *rvu)
1670 {
1671 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1672
1673 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1674 &rvu_dbg_npa_qsize_fops);
1675 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1676 &rvu_dbg_npa_aura_ctx_fops);
1677 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1678 &rvu_dbg_npa_pool_ctx_fops);
1679 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1680 &rvu_dbg_npa_ndc_cache_fops);
1681 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1682 &rvu_dbg_npa_ndc_hits_miss_fops);
1683 }
1684
1685 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
1686 ({ \
1687 u64 cnt; \
1688 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1689 NIX_STATS_RX, &(cnt)); \
1690 if (!err) \
1691 seq_printf(s, "%s: %llu\n", name, cnt); \
1692 cnt; \
1693 })
1694
1695 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
1696 ({ \
1697 u64 cnt; \
1698 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1699 NIX_STATS_TX, &(cnt)); \
1700 if (!err) \
1701 seq_printf(s, "%s: %llu\n", name, cnt); \
1702 cnt; \
1703 })
1704
cgx_print_stats(struct seq_file * s,int lmac_id)1705 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1706 {
1707 struct cgx_link_user_info linfo;
1708 struct mac_ops *mac_ops;
1709 void *cgxd = s->private;
1710 u64 ucast, mcast, bcast;
1711 int stat = 0, err = 0;
1712 u64 tx_stat, rx_stat;
1713 struct rvu *rvu;
1714
1715 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1716 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1717 if (!rvu)
1718 return -ENODEV;
1719
1720 mac_ops = get_mac_ops(cgxd);
1721
1722 if (!mac_ops)
1723 return 0;
1724
1725 /* Link status */
1726 seq_puts(s, "\n=======Link Status======\n\n");
1727 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1728 if (err)
1729 seq_puts(s, "Failed to read link status\n");
1730 seq_printf(s, "\nLink is %s %d Mbps\n\n",
1731 linfo.link_up ? "UP" : "DOWN", linfo.speed);
1732
1733 /* Rx stats */
1734 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1735 mac_ops->name);
1736 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1737 if (err)
1738 return err;
1739 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1740 if (err)
1741 return err;
1742 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1743 if (err)
1744 return err;
1745 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1746 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1747 if (err)
1748 return err;
1749 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1750 if (err)
1751 return err;
1752 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1753 if (err)
1754 return err;
1755
1756 /* Tx stats */
1757 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1758 mac_ops->name);
1759 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1760 if (err)
1761 return err;
1762 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1763 if (err)
1764 return err;
1765 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1766 if (err)
1767 return err;
1768 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1769 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1770 if (err)
1771 return err;
1772 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1773 if (err)
1774 return err;
1775
1776 /* Rx stats */
1777 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1778 while (stat < mac_ops->rx_stats_cnt) {
1779 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1780 if (err)
1781 return err;
1782 if (is_rvu_otx2(rvu))
1783 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1784 rx_stat);
1785 else
1786 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1787 rx_stat);
1788 stat++;
1789 }
1790
1791 /* Tx stats */
1792 stat = 0;
1793 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1794 while (stat < mac_ops->tx_stats_cnt) {
1795 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1796 if (err)
1797 return err;
1798
1799 if (is_rvu_otx2(rvu))
1800 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1801 tx_stat);
1802 else
1803 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1804 tx_stat);
1805 stat++;
1806 }
1807
1808 return err;
1809 }
1810
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)1811 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1812 {
1813 struct dentry *current_dir;
1814 int err, lmac_id;
1815 char *buf;
1816
1817 current_dir = filp->file->f_path.dentry->d_parent;
1818 buf = strrchr(current_dir->d_name.name, 'c');
1819 if (!buf)
1820 return -EINVAL;
1821
1822 err = kstrtoint(buf + 1, 10, &lmac_id);
1823 if (!err) {
1824 err = cgx_print_stats(filp, lmac_id);
1825 if (err)
1826 return err;
1827 }
1828 return err;
1829 }
1830
1831 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1832
rvu_dbg_cgx_init(struct rvu * rvu)1833 static void rvu_dbg_cgx_init(struct rvu *rvu)
1834 {
1835 struct mac_ops *mac_ops;
1836 unsigned long lmac_bmap;
1837 int i, lmac_id;
1838 char dname[20];
1839 void *cgx;
1840
1841 if (!cgx_get_cgxcnt_max())
1842 return;
1843
1844 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
1845 if (!mac_ops)
1846 return;
1847
1848 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
1849 rvu->rvu_dbg.root);
1850
1851 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1852 cgx = rvu_cgx_pdata(i, rvu);
1853 if (!cgx)
1854 continue;
1855 lmac_bmap = cgx_get_lmac_bmap(cgx);
1856 /* cgx debugfs dir */
1857 sprintf(dname, "%s%d", mac_ops->name, i);
1858 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1859 rvu->rvu_dbg.cgx_root);
1860
1861 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
1862 /* lmac debugfs dir */
1863 sprintf(dname, "lmac%d", lmac_id);
1864 rvu->rvu_dbg.lmac =
1865 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1866
1867 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
1868 cgx, &rvu_dbg_cgx_stat_fops);
1869 }
1870 }
1871 }
1872
1873 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)1874 static void rvu_print_npc_mcam_info(struct seq_file *s,
1875 u16 pcifunc, int blkaddr)
1876 {
1877 struct rvu *rvu = s->private;
1878 int entry_acnt, entry_ecnt;
1879 int cntr_acnt, cntr_ecnt;
1880
1881 /* Skip PF0 */
1882 if (!pcifunc)
1883 return;
1884 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1885 &entry_acnt, &entry_ecnt);
1886 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1887 &cntr_acnt, &cntr_ecnt);
1888 if (!entry_acnt && !cntr_acnt)
1889 return;
1890
1891 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1892 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1893 rvu_get_pf(pcifunc));
1894 else
1895 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1896 rvu_get_pf(pcifunc),
1897 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1898
1899 if (entry_acnt) {
1900 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1901 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1902 }
1903 if (cntr_acnt) {
1904 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1905 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1906 }
1907 }
1908
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)1909 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1910 {
1911 struct rvu *rvu = filp->private;
1912 int pf, vf, numvfs, blkaddr;
1913 struct npc_mcam *mcam;
1914 u16 pcifunc, counters;
1915 u64 cfg;
1916
1917 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1918 if (blkaddr < 0)
1919 return -ENODEV;
1920
1921 mcam = &rvu->hw->mcam;
1922 counters = rvu->hw->npc_counters;
1923
1924 seq_puts(filp, "\nNPC MCAM info:\n");
1925 /* MCAM keywidth on receive and transmit sides */
1926 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1927 cfg = (cfg >> 32) & 0x07;
1928 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1929 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1930 "224bits" : "448bits"));
1931 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1932 cfg = (cfg >> 32) & 0x07;
1933 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1934 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1935 "224bits" : "448bits"));
1936
1937 mutex_lock(&mcam->lock);
1938 /* MCAM entries */
1939 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1940 seq_printf(filp, "\t\t Reserved \t: %d\n",
1941 mcam->total_entries - mcam->bmap_entries);
1942 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1943
1944 /* MCAM counters */
1945 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
1946 seq_printf(filp, "\t\t Reserved \t: %d\n",
1947 counters - mcam->counters.max);
1948 seq_printf(filp, "\t\t Available \t: %d\n",
1949 rvu_rsrc_free_count(&mcam->counters));
1950
1951 if (mcam->bmap_entries == mcam->bmap_fcnt) {
1952 mutex_unlock(&mcam->lock);
1953 return 0;
1954 }
1955
1956 seq_puts(filp, "\n\t\t Current allocation\n");
1957 seq_puts(filp, "\t\t====================\n");
1958 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1959 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1960 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1961
1962 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1963 numvfs = (cfg >> 12) & 0xFF;
1964 for (vf = 0; vf < numvfs; vf++) {
1965 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1966 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1967 }
1968 }
1969
1970 mutex_unlock(&mcam->lock);
1971 return 0;
1972 }
1973
1974 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1975
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)1976 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1977 void *unused)
1978 {
1979 struct rvu *rvu = filp->private;
1980 struct npc_mcam *mcam;
1981 int blkaddr;
1982
1983 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1984 if (blkaddr < 0)
1985 return -ENODEV;
1986
1987 mcam = &rvu->hw->mcam;
1988
1989 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1990 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1991 rvu_read64(rvu, blkaddr,
1992 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1993
1994 return 0;
1995 }
1996
1997 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1998
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)1999 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2000 struct rvu_npc_mcam_rule *rule)
2001 {
2002 u8 bit;
2003
2004 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2005 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2006 switch (bit) {
2007 case NPC_DMAC:
2008 seq_printf(s, "%pM ", rule->packet.dmac);
2009 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2010 break;
2011 case NPC_SMAC:
2012 seq_printf(s, "%pM ", rule->packet.smac);
2013 seq_printf(s, "mask %pM\n", rule->mask.smac);
2014 break;
2015 case NPC_ETYPE:
2016 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2017 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2018 break;
2019 case NPC_OUTER_VID:
2020 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2021 seq_printf(s, "mask 0x%x\n",
2022 ntohs(rule->mask.vlan_tci));
2023 break;
2024 case NPC_TOS:
2025 seq_printf(s, "%d ", rule->packet.tos);
2026 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2027 break;
2028 case NPC_SIP_IPV4:
2029 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2030 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2031 break;
2032 case NPC_DIP_IPV4:
2033 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2034 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2035 break;
2036 case NPC_SIP_IPV6:
2037 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2038 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2039 break;
2040 case NPC_DIP_IPV6:
2041 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2042 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2043 break;
2044 case NPC_SPORT_TCP:
2045 case NPC_SPORT_UDP:
2046 case NPC_SPORT_SCTP:
2047 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2048 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2049 break;
2050 case NPC_DPORT_TCP:
2051 case NPC_DPORT_UDP:
2052 case NPC_DPORT_SCTP:
2053 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2054 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2055 break;
2056 default:
2057 seq_puts(s, "\n");
2058 break;
2059 }
2060 }
2061 }
2062
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2063 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2064 struct rvu_npc_mcam_rule *rule)
2065 {
2066 if (rule->intf == NIX_INTF_TX) {
2067 switch (rule->tx_action.op) {
2068 case NIX_TX_ACTIONOP_DROP:
2069 seq_puts(s, "\taction: Drop\n");
2070 break;
2071 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2072 seq_puts(s, "\taction: Unicast to default channel\n");
2073 break;
2074 case NIX_TX_ACTIONOP_UCAST_CHAN:
2075 seq_printf(s, "\taction: Unicast to channel %d\n",
2076 rule->tx_action.index);
2077 break;
2078 case NIX_TX_ACTIONOP_MCAST:
2079 seq_puts(s, "\taction: Multicast\n");
2080 break;
2081 case NIX_TX_ACTIONOP_DROP_VIOL:
2082 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2083 break;
2084 default:
2085 break;
2086 }
2087 } else {
2088 switch (rule->rx_action.op) {
2089 case NIX_RX_ACTIONOP_DROP:
2090 seq_puts(s, "\taction: Drop\n");
2091 break;
2092 case NIX_RX_ACTIONOP_UCAST:
2093 seq_printf(s, "\taction: Direct to queue %d\n",
2094 rule->rx_action.index);
2095 break;
2096 case NIX_RX_ACTIONOP_RSS:
2097 seq_puts(s, "\taction: RSS\n");
2098 break;
2099 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2100 seq_puts(s, "\taction: Unicast ipsec\n");
2101 break;
2102 case NIX_RX_ACTIONOP_MCAST:
2103 seq_puts(s, "\taction: Multicast\n");
2104 break;
2105 default:
2106 break;
2107 }
2108 }
2109 }
2110
rvu_dbg_get_intf_name(int intf)2111 static const char *rvu_dbg_get_intf_name(int intf)
2112 {
2113 switch (intf) {
2114 case NIX_INTFX_RX(0):
2115 return "NIX0_RX";
2116 case NIX_INTFX_RX(1):
2117 return "NIX1_RX";
2118 case NIX_INTFX_TX(0):
2119 return "NIX0_TX";
2120 case NIX_INTFX_TX(1):
2121 return "NIX1_TX";
2122 default:
2123 break;
2124 }
2125
2126 return "unknown";
2127 }
2128
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)2129 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2130 {
2131 struct rvu_npc_mcam_rule *iter;
2132 struct rvu *rvu = s->private;
2133 struct npc_mcam *mcam;
2134 int pf, vf = -1;
2135 int blkaddr;
2136 u16 target;
2137 u64 hits;
2138
2139 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2140 if (blkaddr < 0)
2141 return 0;
2142
2143 mcam = &rvu->hw->mcam;
2144
2145 mutex_lock(&mcam->lock);
2146 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2147 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2148 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2149
2150 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2151 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2152 seq_printf(s, "VF%d", vf);
2153 }
2154 seq_puts(s, "\n");
2155
2156 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2157 "RX" : "TX");
2158 seq_printf(s, "\tinterface: %s\n",
2159 rvu_dbg_get_intf_name(iter->intf));
2160 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2161
2162 rvu_dbg_npc_mcam_show_flows(s, iter);
2163 if (is_npc_intf_rx(iter->intf)) {
2164 target = iter->rx_action.pf_func;
2165 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2166 seq_printf(s, "\tForward to: PF%d ", pf);
2167
2168 if (target & RVU_PFVF_FUNC_MASK) {
2169 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2170 seq_printf(s, "VF%d", vf);
2171 }
2172 seq_puts(s, "\n");
2173 }
2174
2175 rvu_dbg_npc_mcam_show_action(s, iter);
2176 seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
2177
2178 if (!iter->has_cntr)
2179 continue;
2180 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2181
2182 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2183 seq_printf(s, "\thits: %lld\n", hits);
2184 }
2185 mutex_unlock(&mcam->lock);
2186
2187 return 0;
2188 }
2189
2190 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2191
rvu_dbg_npc_init(struct rvu * rvu)2192 static void rvu_dbg_npc_init(struct rvu *rvu)
2193 {
2194 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2195
2196 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2197 &rvu_dbg_npc_mcam_info_fops);
2198 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2199 &rvu_dbg_npc_mcam_rules_fops);
2200 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2201 &rvu_dbg_npc_rx_miss_act_fops);
2202 }
2203
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)2204 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2205 {
2206 struct cpt_ctx *ctx = filp->private;
2207 u64 busy_sts = 0, free_sts = 0;
2208 u32 e_min = 0, e_max = 0, e, i;
2209 u16 max_ses, max_ies, max_aes;
2210 struct rvu *rvu = ctx->rvu;
2211 int blkaddr = ctx->blkaddr;
2212 u64 reg;
2213
2214 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2215 max_ses = reg & 0xffff;
2216 max_ies = (reg >> 16) & 0xffff;
2217 max_aes = (reg >> 32) & 0xffff;
2218
2219 switch (eng_type) {
2220 case CPT_AE_TYPE:
2221 e_min = max_ses + max_ies;
2222 e_max = max_ses + max_ies + max_aes;
2223 break;
2224 case CPT_SE_TYPE:
2225 e_min = 0;
2226 e_max = max_ses;
2227 break;
2228 case CPT_IE_TYPE:
2229 e_min = max_ses;
2230 e_max = max_ses + max_ies;
2231 break;
2232 default:
2233 return -EINVAL;
2234 }
2235
2236 for (e = e_min, i = 0; e < e_max; e++, i++) {
2237 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2238 if (reg & 0x1)
2239 busy_sts |= 1ULL << i;
2240
2241 if (reg & 0x2)
2242 free_sts |= 1ULL << i;
2243 }
2244 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2245 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2246
2247 return 0;
2248 }
2249
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)2250 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2251 {
2252 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2253 }
2254
2255 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2256
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)2257 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2258 {
2259 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2260 }
2261
2262 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2263
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)2264 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2265 {
2266 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2267 }
2268
2269 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2270
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)2271 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2272 {
2273 struct cpt_ctx *ctx = filp->private;
2274 u16 max_ses, max_ies, max_aes;
2275 struct rvu *rvu = ctx->rvu;
2276 int blkaddr = ctx->blkaddr;
2277 u32 e_max, e;
2278 u64 reg;
2279
2280 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2281 max_ses = reg & 0xffff;
2282 max_ies = (reg >> 16) & 0xffff;
2283 max_aes = (reg >> 32) & 0xffff;
2284
2285 e_max = max_ses + max_ies + max_aes;
2286
2287 seq_puts(filp, "===========================================\n");
2288 for (e = 0; e < e_max; e++) {
2289 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2290 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2291 reg & 0xff);
2292 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2293 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2294 reg);
2295 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2296 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2297 reg);
2298 seq_puts(filp, "===========================================\n");
2299 }
2300 return 0;
2301 }
2302
2303 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2304
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)2305 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2306 {
2307 struct cpt_ctx *ctx = filp->private;
2308 int blkaddr = ctx->blkaddr;
2309 struct rvu *rvu = ctx->rvu;
2310 struct rvu_block *block;
2311 struct rvu_hwinfo *hw;
2312 u64 reg;
2313 u32 lf;
2314
2315 hw = rvu->hw;
2316 block = &hw->block[blkaddr];
2317 if (!block->lf.bmap)
2318 return -ENODEV;
2319
2320 seq_puts(filp, "===========================================\n");
2321 for (lf = 0; lf < block->lf.max; lf++) {
2322 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2323 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2324 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2325 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2326 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2327 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2328 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2329 (lf << block->lfshift));
2330 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2331 seq_puts(filp, "===========================================\n");
2332 }
2333 return 0;
2334 }
2335
2336 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2337
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)2338 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2339 {
2340 struct cpt_ctx *ctx = filp->private;
2341 struct rvu *rvu = ctx->rvu;
2342 int blkaddr = ctx->blkaddr;
2343 u64 reg0, reg1;
2344
2345 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2346 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2347 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2348 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2349 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2350 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2351 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2352 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2353 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2354 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2355 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2356 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2357 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2358 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2359
2360 return 0;
2361 }
2362
2363 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2364
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)2365 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2366 {
2367 struct cpt_ctx *ctx = filp->private;
2368 struct rvu *rvu = ctx->rvu;
2369 int blkaddr = ctx->blkaddr;
2370 u64 reg;
2371
2372 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2373 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2374 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2375 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2376 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2377 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2378 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2379 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2380 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2381 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2382 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2383 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2384 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2385 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2386
2387 return 0;
2388 }
2389
2390 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2391
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)2392 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2393 {
2394 struct cpt_ctx *ctx;
2395
2396 if (!is_block_implemented(rvu->hw, blkaddr))
2397 return;
2398
2399 if (blkaddr == BLKADDR_CPT0) {
2400 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2401 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2402 ctx->blkaddr = BLKADDR_CPT0;
2403 ctx->rvu = rvu;
2404 } else {
2405 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2406 rvu->rvu_dbg.root);
2407 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2408 ctx->blkaddr = BLKADDR_CPT1;
2409 ctx->rvu = rvu;
2410 }
2411
2412 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2413 &rvu_dbg_cpt_pc_fops);
2414 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2415 &rvu_dbg_cpt_ae_sts_fops);
2416 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2417 &rvu_dbg_cpt_se_sts_fops);
2418 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2419 &rvu_dbg_cpt_ie_sts_fops);
2420 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2421 &rvu_dbg_cpt_engines_info_fops);
2422 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2423 &rvu_dbg_cpt_lfs_info_fops);
2424 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2425 &rvu_dbg_cpt_err_info_fops);
2426 }
2427
rvu_get_dbg_dir_name(struct rvu * rvu)2428 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2429 {
2430 if (!is_rvu_otx2(rvu))
2431 return "cn10k";
2432 else
2433 return "octeontx2";
2434 }
2435
rvu_dbg_init(struct rvu * rvu)2436 void rvu_dbg_init(struct rvu *rvu)
2437 {
2438 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2439
2440 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2441 &rvu_dbg_rsrc_status_fops);
2442
2443 if (!cgx_get_cgxcnt_max())
2444 goto create;
2445
2446 if (is_rvu_otx2(rvu))
2447 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2448 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2449 else
2450 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2451 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2452
2453 create:
2454 rvu_dbg_npa_init(rvu);
2455 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2456
2457 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2458 rvu_dbg_cgx_init(rvu);
2459 rvu_dbg_npc_init(rvu);
2460 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2461 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2462 }
2463
rvu_dbg_exit(struct rvu * rvu)2464 void rvu_dbg_exit(struct rvu *rvu)
2465 {
2466 debugfs_remove_recursive(rvu->rvu_dbg.root);
2467 }
2468
2469 #endif /* CONFIG_DEBUG_FS */
2470