1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  */
5 
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
9 #include "qed.h"
10 #include "qed_hsi.h"
11 #include "qed_hw.h"
12 #include "qed_mcp.h"
13 #include "qed_reg_addr.h"
14 
15 /* Memory groups enum */
16 enum mem_groups {
17 	MEM_GROUP_PXP_MEM,
18 	MEM_GROUP_DMAE_MEM,
19 	MEM_GROUP_CM_MEM,
20 	MEM_GROUP_QM_MEM,
21 	MEM_GROUP_DORQ_MEM,
22 	MEM_GROUP_BRB_RAM,
23 	MEM_GROUP_BRB_MEM,
24 	MEM_GROUP_PRS_MEM,
25 	MEM_GROUP_IOR,
26 	MEM_GROUP_BTB_RAM,
27 	MEM_GROUP_CONN_CFC_MEM,
28 	MEM_GROUP_TASK_CFC_MEM,
29 	MEM_GROUP_CAU_PI,
30 	MEM_GROUP_CAU_MEM,
31 	MEM_GROUP_PXP_ILT,
32 	MEM_GROUP_TM_MEM,
33 	MEM_GROUP_SDM_MEM,
34 	MEM_GROUP_PBUF,
35 	MEM_GROUP_RAM,
36 	MEM_GROUP_MULD_MEM,
37 	MEM_GROUP_BTB_MEM,
38 	MEM_GROUP_RDIF_CTX,
39 	MEM_GROUP_TDIF_CTX,
40 	MEM_GROUP_CFC_MEM,
41 	MEM_GROUP_IGU_MEM,
42 	MEM_GROUP_IGU_MSIX,
43 	MEM_GROUP_CAU_SB,
44 	MEM_GROUP_BMB_RAM,
45 	MEM_GROUP_BMB_MEM,
46 	MEM_GROUPS_NUM
47 };
48 
49 /* Memory groups names */
50 static const char * const s_mem_group_names[] = {
51 	"PXP_MEM",
52 	"DMAE_MEM",
53 	"CM_MEM",
54 	"QM_MEM",
55 	"DORQ_MEM",
56 	"BRB_RAM",
57 	"BRB_MEM",
58 	"PRS_MEM",
59 	"IOR",
60 	"BTB_RAM",
61 	"CONN_CFC_MEM",
62 	"TASK_CFC_MEM",
63 	"CAU_PI",
64 	"CAU_MEM",
65 	"PXP_ILT",
66 	"TM_MEM",
67 	"SDM_MEM",
68 	"PBUF",
69 	"RAM",
70 	"MULD_MEM",
71 	"BTB_MEM",
72 	"RDIF_CTX",
73 	"TDIF_CTX",
74 	"CFC_MEM",
75 	"IGU_MEM",
76 	"IGU_MSIX",
77 	"CAU_SB",
78 	"BMB_RAM",
79 	"BMB_MEM",
80 };
81 
82 /* Idle check conditions */
83 
84 static u32 cond5(const u32 *r, const u32 *imm)
85 {
86 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
87 }
88 
89 static u32 cond7(const u32 *r, const u32 *imm)
90 {
91 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
92 }
93 
94 static u32 cond6(const u32 *r, const u32 *imm)
95 {
96 	return (r[0] & imm[0]) != imm[1];
97 }
98 
99 static u32 cond9(const u32 *r, const u32 *imm)
100 {
101 	return ((r[0] & imm[0]) >> imm[1]) !=
102 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
103 }
104 
105 static u32 cond10(const u32 *r, const u32 *imm)
106 {
107 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
108 }
109 
110 static u32 cond4(const u32 *r, const u32 *imm)
111 {
112 	return (r[0] & ~imm[0]) != imm[1];
113 }
114 
115 static u32 cond0(const u32 *r, const u32 *imm)
116 {
117 	return (r[0] & ~r[1]) != imm[0];
118 }
119 
120 static u32 cond1(const u32 *r, const u32 *imm)
121 {
122 	return r[0] != imm[0];
123 }
124 
125 static u32 cond11(const u32 *r, const u32 *imm)
126 {
127 	return r[0] != r[1] && r[2] == imm[0];
128 }
129 
130 static u32 cond12(const u32 *r, const u32 *imm)
131 {
132 	return r[0] != r[1] && r[2] > imm[0];
133 }
134 
135 static u32 cond3(const u32 *r, const u32 *imm)
136 {
137 	return r[0] != r[1];
138 }
139 
140 static u32 cond13(const u32 *r, const u32 *imm)
141 {
142 	return r[0] & imm[0];
143 }
144 
145 static u32 cond8(const u32 *r, const u32 *imm)
146 {
147 	return r[0] < (r[1] - imm[0]);
148 }
149 
150 static u32 cond2(const u32 *r, const u32 *imm)
151 {
152 	return r[0] > imm[0];
153 }
154 
155 /* Array of Idle Check conditions */
156 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
157 	cond0,
158 	cond1,
159 	cond2,
160 	cond3,
161 	cond4,
162 	cond5,
163 	cond6,
164 	cond7,
165 	cond8,
166 	cond9,
167 	cond10,
168 	cond11,
169 	cond12,
170 	cond13,
171 };
172 
173 /******************************* Data Types **********************************/
174 
175 enum platform_ids {
176 	PLATFORM_ASIC,
177 	PLATFORM_RESERVED,
178 	PLATFORM_RESERVED2,
179 	PLATFORM_RESERVED3,
180 	MAX_PLATFORM_IDS
181 };
182 
183 /* Chip constant definitions */
184 struct chip_defs {
185 	const char *name;
186 };
187 
188 /* Platform constant definitions */
189 struct platform_defs {
190 	const char *name;
191 	u32 delay_factor;
192 	u32 dmae_thresh;
193 	u32 log_thresh;
194 };
195 
196 /* Storm constant definitions.
197  * Addresses are in bytes, sizes are in quad-regs.
198  */
199 struct storm_defs {
200 	char letter;
201 	enum block_id block_id;
202 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
203 	bool has_vfc;
204 	u32 sem_fast_mem_addr;
205 	u32 sem_frame_mode_addr;
206 	u32 sem_slow_enable_addr;
207 	u32 sem_slow_mode_addr;
208 	u32 sem_slow_mode1_conf_addr;
209 	u32 sem_sync_dbg_empty_addr;
210 	u32 sem_slow_dbg_empty_addr;
211 	u32 cm_ctx_wr_addr;
212 	u32 cm_conn_ag_ctx_lid_size;
213 	u32 cm_conn_ag_ctx_rd_addr;
214 	u32 cm_conn_st_ctx_lid_size;
215 	u32 cm_conn_st_ctx_rd_addr;
216 	u32 cm_task_ag_ctx_lid_size;
217 	u32 cm_task_ag_ctx_rd_addr;
218 	u32 cm_task_st_ctx_lid_size;
219 	u32 cm_task_st_ctx_rd_addr;
220 };
221 
222 /* Block constant definitions */
223 struct block_defs {
224 	const char *name;
225 	bool exists[MAX_CHIP_IDS];
226 	bool associated_to_storm;
227 
228 	/* Valid only if associated_to_storm is true */
229 	u32 storm_id;
230 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
231 	u32 dbg_select_addr;
232 	u32 dbg_enable_addr;
233 	u32 dbg_shift_addr;
234 	u32 dbg_force_valid_addr;
235 	u32 dbg_force_frame_addr;
236 	bool has_reset_bit;
237 
238 	/* If true, block is taken out of reset before dump */
239 	bool unreset;
240 	enum dbg_reset_regs reset_reg;
241 
242 	/* Bit offset in reset register */
243 	u8 reset_bit_offset;
244 };
245 
246 /* Reset register definitions */
247 struct reset_reg_defs {
248 	u32 addr;
249 	bool exists[MAX_CHIP_IDS];
250 	u32 unreset_val[MAX_CHIP_IDS];
251 };
252 
253 struct grc_param_defs {
254 	u32 default_val[MAX_CHIP_IDS];
255 	u32 min;
256 	u32 max;
257 	bool is_preset;
258 	bool is_persistent;
259 	u32 exclude_all_preset_val;
260 	u32 crash_preset_val;
261 };
262 
263 /* Address is in 128b units. Width is in bits. */
264 struct rss_mem_defs {
265 	const char *mem_name;
266 	const char *type_name;
267 	u32 addr;
268 	u32 entry_width;
269 	u32 num_entries[MAX_CHIP_IDS];
270 };
271 
272 struct vfc_ram_defs {
273 	const char *mem_name;
274 	const char *type_name;
275 	u32 base_row;
276 	u32 num_rows;
277 };
278 
279 struct big_ram_defs {
280 	const char *instance_name;
281 	enum mem_groups mem_group_id;
282 	enum mem_groups ram_mem_group_id;
283 	enum dbg_grc_params grc_param;
284 	u32 addr_reg_addr;
285 	u32 data_reg_addr;
286 	u32 is_256b_reg_addr;
287 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
288 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
289 };
290 
291 struct phy_defs {
292 	const char *phy_name;
293 
294 	/* PHY base GRC address */
295 	u32 base_addr;
296 
297 	/* Relative address of indirect TBUS address register (bits 0..7) */
298 	u32 tbus_addr_lo_addr;
299 
300 	/* Relative address of indirect TBUS address register (bits 8..10) */
301 	u32 tbus_addr_hi_addr;
302 
303 	/* Relative address of indirect TBUS data register (bits 0..7) */
304 	u32 tbus_data_lo_addr;
305 
306 	/* Relative address of indirect TBUS data register (bits 8..11) */
307 	u32 tbus_data_hi_addr;
308 };
309 
310 /* Split type definitions */
311 struct split_type_defs {
312 	const char *name;
313 };
314 
315 /******************************** Constants **********************************/
316 
317 #define MAX_LCIDS			320
318 #define MAX_LTIDS			320
319 
320 #define NUM_IOR_SETS			2
321 #define IORS_PER_SET			176
322 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
323 
324 #define BYTES_IN_DWORD			sizeof(u32)
325 
326 /* In the macros below, size and offset are specified in bits */
327 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
328 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
329 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
330 #define FIELD_DWORD_OFFSET(type, field) \
331 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
332 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
333 #define FIELD_BIT_MASK(type, field) \
334 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
335 	 FIELD_DWORD_SHIFT(type, field))
336 
337 #define SET_VAR_FIELD(var, type, field, val) \
338 	do { \
339 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
340 		(~FIELD_BIT_MASK(type, field));	\
341 		var[FIELD_DWORD_OFFSET(type, field)] |= \
342 		(val) << FIELD_DWORD_SHIFT(type, field); \
343 	} while (0)
344 
345 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
346 	do { \
347 		for (i = 0; i < (arr_size); i++) \
348 			qed_wr(dev, ptt, addr,	(arr)[i]); \
349 	} while (0)
350 
351 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
352 	do { \
353 		for (i = 0; i < (arr_size); i++) \
354 			(arr)[i] = qed_rd(dev, ptt, addr); \
355 	} while (0)
356 
357 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
358 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
359 
360 /* Extra lines include a signature line + optional latency events line */
361 #define NUM_EXTRA_DBG_LINES(block_desc) \
362 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
363 #define NUM_DBG_LINES(block_desc) \
364 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
365 
366 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
367 #define RAM_LINES_TO_BYTES(lines) \
368 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
369 
370 #define REG_DUMP_LEN_SHIFT		24
371 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
372 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
373 
374 #define IDLE_CHK_RULE_SIZE_DWORDS \
375 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
376 
377 #define IDLE_CHK_RESULT_HDR_DWORDS \
378 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
379 
380 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
381 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
382 
383 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
384 
385 /* The sizes and offsets below are specified in bits */
386 #define VFC_CAM_CMD_STRUCT_SIZE		64
387 #define VFC_CAM_CMD_ROW_OFFSET		48
388 #define VFC_CAM_CMD_ROW_SIZE		9
389 #define VFC_CAM_ADDR_STRUCT_SIZE	16
390 #define VFC_CAM_ADDR_OP_OFFSET		0
391 #define VFC_CAM_ADDR_OP_SIZE		4
392 #define VFC_CAM_RESP_STRUCT_SIZE	256
393 #define VFC_RAM_ADDR_STRUCT_SIZE	16
394 #define VFC_RAM_ADDR_OP_OFFSET		0
395 #define VFC_RAM_ADDR_OP_SIZE		2
396 #define VFC_RAM_ADDR_ROW_OFFSET		2
397 #define VFC_RAM_ADDR_ROW_SIZE		10
398 #define VFC_RAM_RESP_STRUCT_SIZE	256
399 
400 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
401 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
402 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
403 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
404 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
405 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
406 
407 #define NUM_VFC_RAM_TYPES		4
408 
409 #define VFC_CAM_NUM_ROWS		512
410 
411 #define VFC_OPCODE_CAM_RD		14
412 #define VFC_OPCODE_RAM_RD		0
413 
414 #define NUM_RSS_MEM_TYPES		5
415 
416 #define NUM_BIG_RAM_TYPES		3
417 #define BIG_RAM_NAME_LEN		3
418 
419 #define NUM_PHY_TBUS_ADDRESSES		2048
420 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
421 
422 #define RESET_REG_UNRESET_OFFSET	4
423 
424 #define STALL_DELAY_MS			500
425 
426 #define STATIC_DEBUG_LINE_DWORDS	9
427 
428 #define NUM_COMMON_GLOBAL_PARAMS	8
429 
430 #define FW_IMG_MAIN			1
431 
432 #define REG_FIFO_ELEMENT_DWORDS		2
433 #define REG_FIFO_DEPTH_ELEMENTS		32
434 #define REG_FIFO_DEPTH_DWORDS \
435 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
436 
437 #define IGU_FIFO_ELEMENT_DWORDS		4
438 #define IGU_FIFO_DEPTH_ELEMENTS		64
439 #define IGU_FIFO_DEPTH_DWORDS \
440 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
441 
442 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
443 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
444 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
445 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
446 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
447 
448 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
449 	(MCP_REG_SCRATCH + \
450 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
451 
452 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
453 #define EMPTY_FW_IMAGE_STR		"???????????????"
454 
455 /***************************** Constant Arrays *******************************/
456 
457 struct dbg_array {
458 	const u32 *ptr;
459 	u32 size_in_dwords;
460 };
461 
462 /* Debug arrays */
463 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
464 
465 /* Chip constant definitions array */
466 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
467 	{"bb"},
468 	{"ah"},
469 	{"reserved"},
470 };
471 
472 /* Storm constant definitions array */
473 static struct storm_defs s_storm_defs[] = {
474 	/* Tstorm */
475 	{'T', BLOCK_TSEM,
476 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
477 	  DBG_BUS_CLIENT_RBCT}, true,
478 	 TSEM_REG_FAST_MEMORY,
479 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
480 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
481 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
482 	 TCM_REG_CTX_RBC_ACCS,
483 	 4, TCM_REG_AGG_CON_CTX,
484 	 16, TCM_REG_SM_CON_CTX,
485 	 2, TCM_REG_AGG_TASK_CTX,
486 	 4, TCM_REG_SM_TASK_CTX},
487 
488 	/* Mstorm */
489 	{'M', BLOCK_MSEM,
490 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
491 	  DBG_BUS_CLIENT_RBCM}, false,
492 	 MSEM_REG_FAST_MEMORY,
493 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
494 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
495 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
496 	 MCM_REG_CTX_RBC_ACCS,
497 	 1, MCM_REG_AGG_CON_CTX,
498 	 10, MCM_REG_SM_CON_CTX,
499 	 2, MCM_REG_AGG_TASK_CTX,
500 	 7, MCM_REG_SM_TASK_CTX},
501 
502 	/* Ustorm */
503 	{'U', BLOCK_USEM,
504 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
505 	  DBG_BUS_CLIENT_RBCU}, false,
506 	 USEM_REG_FAST_MEMORY,
507 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
508 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
509 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
510 	 UCM_REG_CTX_RBC_ACCS,
511 	 2, UCM_REG_AGG_CON_CTX,
512 	 13, UCM_REG_SM_CON_CTX,
513 	 3, UCM_REG_AGG_TASK_CTX,
514 	 3, UCM_REG_SM_TASK_CTX},
515 
516 	/* Xstorm */
517 	{'X', BLOCK_XSEM,
518 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
519 	  DBG_BUS_CLIENT_RBCX}, false,
520 	 XSEM_REG_FAST_MEMORY,
521 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
522 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
523 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
524 	 XCM_REG_CTX_RBC_ACCS,
525 	 9, XCM_REG_AGG_CON_CTX,
526 	 15, XCM_REG_SM_CON_CTX,
527 	 0, 0,
528 	 0, 0},
529 
530 	/* Ystorm */
531 	{'Y', BLOCK_YSEM,
532 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
533 	  DBG_BUS_CLIENT_RBCY}, false,
534 	 YSEM_REG_FAST_MEMORY,
535 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
536 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
537 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
538 	 YCM_REG_CTX_RBC_ACCS,
539 	 2, YCM_REG_AGG_CON_CTX,
540 	 3, YCM_REG_SM_CON_CTX,
541 	 2, YCM_REG_AGG_TASK_CTX,
542 	 12, YCM_REG_SM_TASK_CTX},
543 
544 	/* Pstorm */
545 	{'P', BLOCK_PSEM,
546 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
547 	  DBG_BUS_CLIENT_RBCS}, true,
548 	 PSEM_REG_FAST_MEMORY,
549 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
550 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
551 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
552 	 PCM_REG_CTX_RBC_ACCS,
553 	 0, 0,
554 	 10, PCM_REG_SM_CON_CTX,
555 	 0, 0,
556 	 0, 0}
557 };
558 
559 /* Block definitions array */
560 
561 static struct block_defs block_grc_defs = {
562 	"grc",
563 	{true, true, true}, false, 0,
564 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
565 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
566 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
567 	GRC_REG_DBG_FORCE_FRAME,
568 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
569 };
570 
571 static struct block_defs block_miscs_defs = {
572 	"miscs", {true, true, true}, false, 0,
573 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
574 	0, 0, 0, 0, 0,
575 	false, false, MAX_DBG_RESET_REGS, 0
576 };
577 
578 static struct block_defs block_misc_defs = {
579 	"misc", {true, true, true}, false, 0,
580 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
581 	0, 0, 0, 0, 0,
582 	false, false, MAX_DBG_RESET_REGS, 0
583 };
584 
585 static struct block_defs block_dbu_defs = {
586 	"dbu", {true, true, true}, false, 0,
587 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
588 	0, 0, 0, 0, 0,
589 	false, false, MAX_DBG_RESET_REGS, 0
590 };
591 
592 static struct block_defs block_pglue_b_defs = {
593 	"pglue_b",
594 	{true, true, true}, false, 0,
595 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
596 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
597 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
598 	PGLUE_B_REG_DBG_FORCE_FRAME,
599 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
600 };
601 
602 static struct block_defs block_cnig_defs = {
603 	"cnig",
604 	{true, true, true}, false, 0,
605 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
606 	 DBG_BUS_CLIENT_RBCW},
607 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
608 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
609 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
610 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
611 };
612 
613 static struct block_defs block_cpmu_defs = {
614 	"cpmu", {true, true, true}, false, 0,
615 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
616 	0, 0, 0, 0, 0,
617 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
618 };
619 
620 static struct block_defs block_ncsi_defs = {
621 	"ncsi",
622 	{true, true, true}, false, 0,
623 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
624 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
625 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
626 	NCSI_REG_DBG_FORCE_FRAME,
627 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
628 };
629 
630 static struct block_defs block_opte_defs = {
631 	"opte", {true, true, false}, false, 0,
632 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
633 	0, 0, 0, 0, 0,
634 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
635 };
636 
637 static struct block_defs block_bmb_defs = {
638 	"bmb",
639 	{true, true, true}, false, 0,
640 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
641 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
642 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
643 	BMB_REG_DBG_FORCE_FRAME,
644 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
645 };
646 
647 static struct block_defs block_pcie_defs = {
648 	"pcie",
649 	{true, true, true}, false, 0,
650 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
651 	 DBG_BUS_CLIENT_RBCH},
652 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
653 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
654 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
655 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
656 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
657 	false, false, MAX_DBG_RESET_REGS, 0
658 };
659 
660 static struct block_defs block_mcp_defs = {
661 	"mcp", {true, true, true}, false, 0,
662 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
663 	0, 0, 0, 0, 0,
664 	false, false, MAX_DBG_RESET_REGS, 0
665 };
666 
667 static struct block_defs block_mcp2_defs = {
668 	"mcp2",
669 	{true, true, true}, false, 0,
670 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
671 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
672 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
673 	MCP2_REG_DBG_FORCE_FRAME,
674 	false, false, MAX_DBG_RESET_REGS, 0
675 };
676 
677 static struct block_defs block_pswhst_defs = {
678 	"pswhst",
679 	{true, true, true}, false, 0,
680 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
681 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
682 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
683 	PSWHST_REG_DBG_FORCE_FRAME,
684 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
685 };
686 
687 static struct block_defs block_pswhst2_defs = {
688 	"pswhst2",
689 	{true, true, true}, false, 0,
690 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
691 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
692 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
693 	PSWHST2_REG_DBG_FORCE_FRAME,
694 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
695 };
696 
697 static struct block_defs block_pswrd_defs = {
698 	"pswrd",
699 	{true, true, true}, false, 0,
700 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
701 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
702 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
703 	PSWRD_REG_DBG_FORCE_FRAME,
704 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
705 };
706 
707 static struct block_defs block_pswrd2_defs = {
708 	"pswrd2",
709 	{true, true, true}, false, 0,
710 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
711 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
712 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
713 	PSWRD2_REG_DBG_FORCE_FRAME,
714 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
715 };
716 
717 static struct block_defs block_pswwr_defs = {
718 	"pswwr",
719 	{true, true, true}, false, 0,
720 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
721 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
722 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
723 	PSWWR_REG_DBG_FORCE_FRAME,
724 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
725 };
726 
727 static struct block_defs block_pswwr2_defs = {
728 	"pswwr2", {true, true, true}, false, 0,
729 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
730 	0, 0, 0, 0, 0,
731 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
732 };
733 
734 static struct block_defs block_pswrq_defs = {
735 	"pswrq",
736 	{true, true, true}, false, 0,
737 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
739 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
740 	PSWRQ_REG_DBG_FORCE_FRAME,
741 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
742 };
743 
744 static struct block_defs block_pswrq2_defs = {
745 	"pswrq2",
746 	{true, true, true}, false, 0,
747 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
748 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
749 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
750 	PSWRQ2_REG_DBG_FORCE_FRAME,
751 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
752 };
753 
754 static struct block_defs block_pglcs_defs = {
755 	"pglcs",
756 	{true, true, true}, false, 0,
757 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
758 	 DBG_BUS_CLIENT_RBCH},
759 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
760 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
761 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
762 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
763 };
764 
765 static struct block_defs block_ptu_defs = {
766 	"ptu",
767 	{true, true, true}, false, 0,
768 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
769 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
770 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
771 	PTU_REG_DBG_FORCE_FRAME,
772 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
773 };
774 
775 static struct block_defs block_dmae_defs = {
776 	"dmae",
777 	{true, true, true}, false, 0,
778 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
779 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
780 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
781 	DMAE_REG_DBG_FORCE_FRAME,
782 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
783 };
784 
785 static struct block_defs block_tcm_defs = {
786 	"tcm",
787 	{true, true, true}, true, DBG_TSTORM_ID,
788 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
789 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
790 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
791 	TCM_REG_DBG_FORCE_FRAME,
792 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
793 };
794 
795 static struct block_defs block_mcm_defs = {
796 	"mcm",
797 	{true, true, true}, true, DBG_MSTORM_ID,
798 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
799 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
800 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
801 	MCM_REG_DBG_FORCE_FRAME,
802 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
803 };
804 
805 static struct block_defs block_ucm_defs = {
806 	"ucm",
807 	{true, true, true}, true, DBG_USTORM_ID,
808 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
809 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
810 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
811 	UCM_REG_DBG_FORCE_FRAME,
812 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
813 };
814 
815 static struct block_defs block_xcm_defs = {
816 	"xcm",
817 	{true, true, true}, true, DBG_XSTORM_ID,
818 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
819 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
820 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
821 	XCM_REG_DBG_FORCE_FRAME,
822 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
823 };
824 
825 static struct block_defs block_ycm_defs = {
826 	"ycm",
827 	{true, true, true}, true, DBG_YSTORM_ID,
828 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
829 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
830 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
831 	YCM_REG_DBG_FORCE_FRAME,
832 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
833 };
834 
835 static struct block_defs block_pcm_defs = {
836 	"pcm",
837 	{true, true, true}, true, DBG_PSTORM_ID,
838 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
839 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
840 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
841 	PCM_REG_DBG_FORCE_FRAME,
842 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
843 };
844 
845 static struct block_defs block_qm_defs = {
846 	"qm",
847 	{true, true, true}, false, 0,
848 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
849 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
850 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
851 	QM_REG_DBG_FORCE_FRAME,
852 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
853 };
854 
855 static struct block_defs block_tm_defs = {
856 	"tm",
857 	{true, true, true}, false, 0,
858 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
859 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
860 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
861 	TM_REG_DBG_FORCE_FRAME,
862 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
863 };
864 
865 static struct block_defs block_dorq_defs = {
866 	"dorq",
867 	{true, true, true}, false, 0,
868 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
869 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
870 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
871 	DORQ_REG_DBG_FORCE_FRAME,
872 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
873 };
874 
875 static struct block_defs block_brb_defs = {
876 	"brb",
877 	{true, true, true}, false, 0,
878 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
879 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
880 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
881 	BRB_REG_DBG_FORCE_FRAME,
882 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
883 };
884 
885 static struct block_defs block_src_defs = {
886 	"src",
887 	{true, true, true}, false, 0,
888 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
889 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
890 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
891 	SRC_REG_DBG_FORCE_FRAME,
892 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
893 };
894 
895 static struct block_defs block_prs_defs = {
896 	"prs",
897 	{true, true, true}, false, 0,
898 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
899 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
900 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
901 	PRS_REG_DBG_FORCE_FRAME,
902 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
903 };
904 
905 static struct block_defs block_tsdm_defs = {
906 	"tsdm",
907 	{true, true, true}, true, DBG_TSTORM_ID,
908 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
909 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
910 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
911 	TSDM_REG_DBG_FORCE_FRAME,
912 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
913 };
914 
915 static struct block_defs block_msdm_defs = {
916 	"msdm",
917 	{true, true, true}, true, DBG_MSTORM_ID,
918 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
919 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
920 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
921 	MSDM_REG_DBG_FORCE_FRAME,
922 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
923 };
924 
925 static struct block_defs block_usdm_defs = {
926 	"usdm",
927 	{true, true, true}, true, DBG_USTORM_ID,
928 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
929 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
930 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
931 	USDM_REG_DBG_FORCE_FRAME,
932 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
933 };
934 
935 static struct block_defs block_xsdm_defs = {
936 	"xsdm",
937 	{true, true, true}, true, DBG_XSTORM_ID,
938 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
939 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
940 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
941 	XSDM_REG_DBG_FORCE_FRAME,
942 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
943 };
944 
945 static struct block_defs block_ysdm_defs = {
946 	"ysdm",
947 	{true, true, true}, true, DBG_YSTORM_ID,
948 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
949 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
950 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
951 	YSDM_REG_DBG_FORCE_FRAME,
952 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
953 };
954 
955 static struct block_defs block_psdm_defs = {
956 	"psdm",
957 	{true, true, true}, true, DBG_PSTORM_ID,
958 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
959 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
960 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
961 	PSDM_REG_DBG_FORCE_FRAME,
962 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
963 };
964 
965 static struct block_defs block_tsem_defs = {
966 	"tsem",
967 	{true, true, true}, true, DBG_TSTORM_ID,
968 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
969 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
970 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
971 	TSEM_REG_DBG_FORCE_FRAME,
972 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
973 };
974 
975 static struct block_defs block_msem_defs = {
976 	"msem",
977 	{true, true, true}, true, DBG_MSTORM_ID,
978 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
979 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
980 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
981 	MSEM_REG_DBG_FORCE_FRAME,
982 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
983 };
984 
985 static struct block_defs block_usem_defs = {
986 	"usem",
987 	{true, true, true}, true, DBG_USTORM_ID,
988 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
989 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
990 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
991 	USEM_REG_DBG_FORCE_FRAME,
992 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
993 };
994 
995 static struct block_defs block_xsem_defs = {
996 	"xsem",
997 	{true, true, true}, true, DBG_XSTORM_ID,
998 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
999 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1000 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1001 	XSEM_REG_DBG_FORCE_FRAME,
1002 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1003 };
1004 
1005 static struct block_defs block_ysem_defs = {
1006 	"ysem",
1007 	{true, true, true}, true, DBG_YSTORM_ID,
1008 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1009 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1010 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1011 	YSEM_REG_DBG_FORCE_FRAME,
1012 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1013 };
1014 
1015 static struct block_defs block_psem_defs = {
1016 	"psem",
1017 	{true, true, true}, true, DBG_PSTORM_ID,
1018 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1019 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1020 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1021 	PSEM_REG_DBG_FORCE_FRAME,
1022 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1023 };
1024 
1025 static struct block_defs block_rss_defs = {
1026 	"rss",
1027 	{true, true, true}, false, 0,
1028 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1029 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1030 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1031 	RSS_REG_DBG_FORCE_FRAME,
1032 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1033 };
1034 
1035 static struct block_defs block_tmld_defs = {
1036 	"tmld",
1037 	{true, true, true}, false, 0,
1038 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1039 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1040 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1041 	TMLD_REG_DBG_FORCE_FRAME,
1042 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1043 };
1044 
1045 static struct block_defs block_muld_defs = {
1046 	"muld",
1047 	{true, true, true}, false, 0,
1048 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1049 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1050 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1051 	MULD_REG_DBG_FORCE_FRAME,
1052 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1053 };
1054 
1055 static struct block_defs block_yuld_defs = {
1056 	"yuld",
1057 	{true, true, false}, false, 0,
1058 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1059 	 MAX_DBG_BUS_CLIENTS},
1060 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1061 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1062 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1063 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1064 	15
1065 };
1066 
1067 static struct block_defs block_xyld_defs = {
1068 	"xyld",
1069 	{true, true, true}, false, 0,
1070 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1071 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1072 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1073 	XYLD_REG_DBG_FORCE_FRAME,
1074 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1075 };
1076 
1077 static struct block_defs block_ptld_defs = {
1078 	"ptld",
1079 	{false, false, true}, false, 0,
1080 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1081 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1082 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1083 	PTLD_REG_DBG_FORCE_FRAME_E5,
1084 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1085 	28
1086 };
1087 
1088 static struct block_defs block_ypld_defs = {
1089 	"ypld",
1090 	{false, false, true}, false, 0,
1091 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1092 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1093 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1094 	YPLD_REG_DBG_FORCE_FRAME_E5,
1095 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1096 	27
1097 };
1098 
1099 static struct block_defs block_prm_defs = {
1100 	"prm",
1101 	{true, true, true}, false, 0,
1102 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1103 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1104 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1105 	PRM_REG_DBG_FORCE_FRAME,
1106 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1107 };
1108 
1109 static struct block_defs block_pbf_pb1_defs = {
1110 	"pbf_pb1",
1111 	{true, true, true}, false, 0,
1112 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1113 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1114 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1115 	PBF_PB1_REG_DBG_FORCE_FRAME,
1116 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1117 	11
1118 };
1119 
1120 static struct block_defs block_pbf_pb2_defs = {
1121 	"pbf_pb2",
1122 	{true, true, true}, false, 0,
1123 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1124 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1125 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1126 	PBF_PB2_REG_DBG_FORCE_FRAME,
1127 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1128 	12
1129 };
1130 
1131 static struct block_defs block_rpb_defs = {
1132 	"rpb",
1133 	{true, true, true}, false, 0,
1134 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1135 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1136 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1137 	RPB_REG_DBG_FORCE_FRAME,
1138 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1139 };
1140 
1141 static struct block_defs block_btb_defs = {
1142 	"btb",
1143 	{true, true, true}, false, 0,
1144 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1145 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1146 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1147 	BTB_REG_DBG_FORCE_FRAME,
1148 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1149 };
1150 
1151 static struct block_defs block_pbf_defs = {
1152 	"pbf",
1153 	{true, true, true}, false, 0,
1154 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1155 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1156 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1157 	PBF_REG_DBG_FORCE_FRAME,
1158 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1159 };
1160 
1161 static struct block_defs block_rdif_defs = {
1162 	"rdif",
1163 	{true, true, true}, false, 0,
1164 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1165 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1166 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1167 	RDIF_REG_DBG_FORCE_FRAME,
1168 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1169 };
1170 
1171 static struct block_defs block_tdif_defs = {
1172 	"tdif",
1173 	{true, true, true}, false, 0,
1174 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1175 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1176 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1177 	TDIF_REG_DBG_FORCE_FRAME,
1178 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1179 };
1180 
1181 static struct block_defs block_cdu_defs = {
1182 	"cdu",
1183 	{true, true, true}, false, 0,
1184 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1185 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1186 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1187 	CDU_REG_DBG_FORCE_FRAME,
1188 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1189 };
1190 
1191 static struct block_defs block_ccfc_defs = {
1192 	"ccfc",
1193 	{true, true, true}, false, 0,
1194 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1195 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1196 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1197 	CCFC_REG_DBG_FORCE_FRAME,
1198 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1199 };
1200 
1201 static struct block_defs block_tcfc_defs = {
1202 	"tcfc",
1203 	{true, true, true}, false, 0,
1204 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1205 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1206 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1207 	TCFC_REG_DBG_FORCE_FRAME,
1208 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1209 };
1210 
1211 static struct block_defs block_igu_defs = {
1212 	"igu",
1213 	{true, true, true}, false, 0,
1214 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1215 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1216 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1217 	IGU_REG_DBG_FORCE_FRAME,
1218 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1219 };
1220 
1221 static struct block_defs block_cau_defs = {
1222 	"cau",
1223 	{true, true, true}, false, 0,
1224 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1225 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1226 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1227 	CAU_REG_DBG_FORCE_FRAME,
1228 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1229 };
1230 
1231 static struct block_defs block_rgfs_defs = {
1232 	"rgfs", {false, false, true}, false, 0,
1233 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1234 	0, 0, 0, 0, 0,
1235 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1236 };
1237 
1238 static struct block_defs block_rgsrc_defs = {
1239 	"rgsrc",
1240 	{false, false, true}, false, 0,
1241 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1242 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1243 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1244 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1245 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1246 	30
1247 };
1248 
1249 static struct block_defs block_tgfs_defs = {
1250 	"tgfs", {false, false, true}, false, 0,
1251 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1252 	0, 0, 0, 0, 0,
1253 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1254 };
1255 
1256 static struct block_defs block_tgsrc_defs = {
1257 	"tgsrc",
1258 	{false, false, true}, false, 0,
1259 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1260 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1261 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1262 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1263 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1264 	31
1265 };
1266 
1267 static struct block_defs block_umac_defs = {
1268 	"umac",
1269 	{true, true, true}, false, 0,
1270 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1271 	 DBG_BUS_CLIENT_RBCZ},
1272 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1273 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1274 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1275 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1276 };
1277 
1278 static struct block_defs block_xmac_defs = {
1279 	"xmac", {true, false, false}, false, 0,
1280 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281 	0, 0, 0, 0, 0,
1282 	false, false, MAX_DBG_RESET_REGS, 0
1283 };
1284 
1285 static struct block_defs block_dbg_defs = {
1286 	"dbg", {true, true, true}, false, 0,
1287 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1288 	0, 0, 0, 0, 0,
1289 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1290 };
1291 
1292 static struct block_defs block_nig_defs = {
1293 	"nig",
1294 	{true, true, true}, false, 0,
1295 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1296 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1297 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1298 	NIG_REG_DBG_FORCE_FRAME,
1299 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1300 };
1301 
1302 static struct block_defs block_wol_defs = {
1303 	"wol",
1304 	{false, true, true}, false, 0,
1305 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1306 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1307 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1308 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1309 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1310 };
1311 
1312 static struct block_defs block_bmbn_defs = {
1313 	"bmbn",
1314 	{false, true, true}, false, 0,
1315 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1316 	 DBG_BUS_CLIENT_RBCB},
1317 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1318 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1319 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1320 	false, false, MAX_DBG_RESET_REGS, 0
1321 };
1322 
1323 static struct block_defs block_ipc_defs = {
1324 	"ipc", {true, true, true}, false, 0,
1325 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1326 	0, 0, 0, 0, 0,
1327 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1328 };
1329 
1330 static struct block_defs block_nwm_defs = {
1331 	"nwm",
1332 	{false, true, true}, false, 0,
1333 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1334 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1335 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1336 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1337 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1338 };
1339 
1340 static struct block_defs block_nws_defs = {
1341 	"nws",
1342 	{false, true, true}, false, 0,
1343 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1344 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1345 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1346 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1347 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1348 };
1349 
1350 static struct block_defs block_ms_defs = {
1351 	"ms",
1352 	{false, true, true}, false, 0,
1353 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1354 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1355 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1356 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1357 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1358 };
1359 
1360 static struct block_defs block_phy_pcie_defs = {
1361 	"phy_pcie",
1362 	{false, true, true}, false, 0,
1363 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1364 	 DBG_BUS_CLIENT_RBCH},
1365 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1366 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1367 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1368 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1369 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1370 	false, false, MAX_DBG_RESET_REGS, 0
1371 };
1372 
1373 static struct block_defs block_led_defs = {
1374 	"led", {false, true, true}, false, 0,
1375 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1376 	0, 0, 0, 0, 0,
1377 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1378 };
1379 
1380 static struct block_defs block_avs_wrap_defs = {
1381 	"avs_wrap", {false, true, false}, false, 0,
1382 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1383 	0, 0, 0, 0, 0,
1384 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1385 };
1386 
1387 static struct block_defs block_pxpreqbus_defs = {
1388 	"pxpreqbus", {false, false, false}, false, 0,
1389 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1390 	0, 0, 0, 0, 0,
1391 	false, false, MAX_DBG_RESET_REGS, 0
1392 };
1393 
1394 static struct block_defs block_misc_aeu_defs = {
1395 	"misc_aeu", {true, true, true}, false, 0,
1396 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1397 	0, 0, 0, 0, 0,
1398 	false, false, MAX_DBG_RESET_REGS, 0
1399 };
1400 
1401 static struct block_defs block_bar0_map_defs = {
1402 	"bar0_map", {true, true, true}, false, 0,
1403 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1404 	0, 0, 0, 0, 0,
1405 	false, false, MAX_DBG_RESET_REGS, 0
1406 };
1407 
1408 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1409 	&block_grc_defs,
1410 	&block_miscs_defs,
1411 	&block_misc_defs,
1412 	&block_dbu_defs,
1413 	&block_pglue_b_defs,
1414 	&block_cnig_defs,
1415 	&block_cpmu_defs,
1416 	&block_ncsi_defs,
1417 	&block_opte_defs,
1418 	&block_bmb_defs,
1419 	&block_pcie_defs,
1420 	&block_mcp_defs,
1421 	&block_mcp2_defs,
1422 	&block_pswhst_defs,
1423 	&block_pswhst2_defs,
1424 	&block_pswrd_defs,
1425 	&block_pswrd2_defs,
1426 	&block_pswwr_defs,
1427 	&block_pswwr2_defs,
1428 	&block_pswrq_defs,
1429 	&block_pswrq2_defs,
1430 	&block_pglcs_defs,
1431 	&block_dmae_defs,
1432 	&block_ptu_defs,
1433 	&block_tcm_defs,
1434 	&block_mcm_defs,
1435 	&block_ucm_defs,
1436 	&block_xcm_defs,
1437 	&block_ycm_defs,
1438 	&block_pcm_defs,
1439 	&block_qm_defs,
1440 	&block_tm_defs,
1441 	&block_dorq_defs,
1442 	&block_brb_defs,
1443 	&block_src_defs,
1444 	&block_prs_defs,
1445 	&block_tsdm_defs,
1446 	&block_msdm_defs,
1447 	&block_usdm_defs,
1448 	&block_xsdm_defs,
1449 	&block_ysdm_defs,
1450 	&block_psdm_defs,
1451 	&block_tsem_defs,
1452 	&block_msem_defs,
1453 	&block_usem_defs,
1454 	&block_xsem_defs,
1455 	&block_ysem_defs,
1456 	&block_psem_defs,
1457 	&block_rss_defs,
1458 	&block_tmld_defs,
1459 	&block_muld_defs,
1460 	&block_yuld_defs,
1461 	&block_xyld_defs,
1462 	&block_ptld_defs,
1463 	&block_ypld_defs,
1464 	&block_prm_defs,
1465 	&block_pbf_pb1_defs,
1466 	&block_pbf_pb2_defs,
1467 	&block_rpb_defs,
1468 	&block_btb_defs,
1469 	&block_pbf_defs,
1470 	&block_rdif_defs,
1471 	&block_tdif_defs,
1472 	&block_cdu_defs,
1473 	&block_ccfc_defs,
1474 	&block_tcfc_defs,
1475 	&block_igu_defs,
1476 	&block_cau_defs,
1477 	&block_rgfs_defs,
1478 	&block_rgsrc_defs,
1479 	&block_tgfs_defs,
1480 	&block_tgsrc_defs,
1481 	&block_umac_defs,
1482 	&block_xmac_defs,
1483 	&block_dbg_defs,
1484 	&block_nig_defs,
1485 	&block_wol_defs,
1486 	&block_bmbn_defs,
1487 	&block_ipc_defs,
1488 	&block_nwm_defs,
1489 	&block_nws_defs,
1490 	&block_ms_defs,
1491 	&block_phy_pcie_defs,
1492 	&block_led_defs,
1493 	&block_avs_wrap_defs,
1494 	&block_pxpreqbus_defs,
1495 	&block_misc_aeu_defs,
1496 	&block_bar0_map_defs,
1497 };
1498 
1499 static struct platform_defs s_platform_defs[] = {
1500 	{"asic", 1, 256, 32768},
1501 	{"reserved", 0, 0, 0},
1502 	{"reserved2", 0, 0, 0},
1503 	{"reserved3", 0, 0, 0}
1504 };
1505 
1506 static struct grc_param_defs s_grc_param_defs[] = {
1507 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1508 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1509 
1510 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1511 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1512 
1513 	/* DBG_GRC_PARAM_DUMP_USTORM */
1514 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1515 
1516 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1517 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1518 
1519 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1520 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1521 
1522 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1523 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_REGS */
1526 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_RAM */
1529 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_PBUF */
1532 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_IOR */
1535 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_VFC */
1538 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1541 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_ILT */
1544 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_DUMP_RSS */
1547 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_CAU */
1550 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_QM */
1553 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_MCP */
1556 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1559 	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_DUMP_CFC */
1562 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_IGU */
1565 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_BRB */
1568 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_BTB */
1571 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_DUMP_BMB */
1574 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_NIG */
1577 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_MULD */
1580 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_PRS */
1583 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_DMAE */
1586 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_DUMP_TM */
1589 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1590 
1591 	/* DBG_GRC_PARAM_DUMP_SDM */
1592 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1593 
1594 	/* DBG_GRC_PARAM_DUMP_DIF */
1595 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1596 
1597 	/* DBG_GRC_PARAM_DUMP_STATIC */
1598 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1599 
1600 	/* DBG_GRC_PARAM_UNSTALL */
1601 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1602 
1603 	/* DBG_GRC_PARAM_NUM_LCIDS */
1604 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1605 	 MAX_LCIDS, MAX_LCIDS},
1606 
1607 	/* DBG_GRC_PARAM_NUM_LTIDS */
1608 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1609 	 MAX_LTIDS, MAX_LTIDS},
1610 
1611 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1612 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1613 
1614 	/* DBG_GRC_PARAM_CRASH */
1615 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1616 
1617 	/* DBG_GRC_PARAM_PARITY_SAFE */
1618 	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1619 
1620 	/* DBG_GRC_PARAM_DUMP_CM */
1621 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1622 
1623 	/* DBG_GRC_PARAM_DUMP_PHY */
1624 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1625 
1626 	/* DBG_GRC_PARAM_NO_MCP */
1627 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1628 
1629 	/* DBG_GRC_PARAM_NO_FW_VER */
1630 	{{0, 0, 0}, 0, 1, false, false, 0, 0}
1631 };
1632 
1633 static struct rss_mem_defs s_rss_mem_defs[] = {
1634 	{ "rss_mem_cid", "rss_cid", 0, 32,
1635 	  {256, 320, 512} },
1636 
1637 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1638 	  {128, 208, 257} },
1639 
1640 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1641 	  {128, 208, 257} },
1642 
1643 	{ "rss_mem_info", "rss_info", 3072, 16,
1644 	  {128, 208, 256} },
1645 
1646 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1647 	  {16384, 26624, 32768} }
1648 };
1649 
1650 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1651 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1652 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1653 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1654 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1655 };
1656 
1657 static struct big_ram_defs s_big_ram_defs[] = {
1658 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1659 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1660 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1661 	  {153600, 180224, 282624} },
1662 
1663 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1664 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1665 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1666 	  {92160, 117760, 168960} },
1667 
1668 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1669 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1670 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1671 	  {36864, 36864, 36864} }
1672 };
1673 
1674 static struct reset_reg_defs s_reset_regs_defs[] = {
1675 	/* DBG_RESET_REG_MISCS_PL_UA */
1676 	{ MISCS_REG_RESET_PL_UA,
1677 	  {true, true, true}, {0x0, 0x0, 0x0} },
1678 
1679 	/* DBG_RESET_REG_MISCS_PL_HV */
1680 	{ MISCS_REG_RESET_PL_HV,
1681 	  {true, true, true}, {0x0, 0x400, 0x600} },
1682 
1683 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1684 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1685 	  {false, true, true}, {0x0, 0x0, 0x0} },
1686 
1687 	/* DBG_RESET_REG_MISC_PL_UA */
1688 	{ MISC_REG_RESET_PL_UA,
1689 	  {true, true, true}, {0x0, 0x0, 0x0} },
1690 
1691 	/* DBG_RESET_REG_MISC_PL_HV */
1692 	{ MISC_REG_RESET_PL_HV,
1693 	  {true, true, true}, {0x0, 0x0, 0x0} },
1694 
1695 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1696 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1697 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1698 
1699 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1700 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1701 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1702 
1703 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1704 	{ MISC_REG_RESET_PL_PDA_VAUX,
1705 	  {true, true, true}, {0x2, 0x2, 0x2} },
1706 };
1707 
1708 static struct phy_defs s_phy_defs[] = {
1709 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1710 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1711 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1712 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1713 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1714 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1715 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1716 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1717 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1718 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1719 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1720 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1721 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1722 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1723 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1724 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1725 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1726 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1727 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1728 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1729 };
1730 
1731 static struct split_type_defs s_split_type_defs[] = {
1732 	/* SPLIT_TYPE_NONE */
1733 	{"eng"},
1734 
1735 	/* SPLIT_TYPE_PORT */
1736 	{"port"},
1737 
1738 	/* SPLIT_TYPE_PF */
1739 	{"pf"},
1740 
1741 	/* SPLIT_TYPE_PORT_PF */
1742 	{"port"},
1743 
1744 	/* SPLIT_TYPE_VF */
1745 	{"vf"}
1746 };
1747 
1748 /**************************** Private Functions ******************************/
1749 
1750 /* Reads and returns a single dword from the specified unaligned buffer */
1751 static u32 qed_read_unaligned_dword(u8 *buf)
1752 {
1753 	u32 dword;
1754 
1755 	memcpy((u8 *)&dword, buf, sizeof(dword));
1756 	return dword;
1757 }
1758 
1759 /* Sets the value of the specified GRC param */
1760 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
1761 			      enum dbg_grc_params grc_param, u32 val)
1762 {
1763 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1764 
1765 	dev_data->grc.param_val[grc_param] = val;
1766 }
1767 
1768 /* Returns the value of the specified GRC param */
1769 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1770 			     enum dbg_grc_params grc_param)
1771 {
1772 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1773 
1774 	return dev_data->grc.param_val[grc_param];
1775 }
1776 
1777 /* Initializes the GRC parameters */
1778 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1779 {
1780 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1781 
1782 	if (!dev_data->grc.params_initialized) {
1783 		qed_dbg_grc_set_params_default(p_hwfn);
1784 		dev_data->grc.params_initialized = 1;
1785 	}
1786 }
1787 
1788 /* Initializes debug data for the specified device */
1789 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1790 					struct qed_ptt *p_ptt)
1791 {
1792 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1793 	u8 num_pfs = 0, max_pfs_per_port = 0;
1794 
1795 	if (dev_data->initialized)
1796 		return DBG_STATUS_OK;
1797 
1798 	/* Set chip */
1799 	if (QED_IS_K2(p_hwfn->cdev)) {
1800 		dev_data->chip_id = CHIP_K2;
1801 		dev_data->mode_enable[MODE_K2] = 1;
1802 		dev_data->num_vfs = MAX_NUM_VFS_K2;
1803 		num_pfs = MAX_NUM_PFS_K2;
1804 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1805 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1806 		dev_data->chip_id = CHIP_BB;
1807 		dev_data->mode_enable[MODE_BB] = 1;
1808 		dev_data->num_vfs = MAX_NUM_VFS_BB;
1809 		num_pfs = MAX_NUM_PFS_BB;
1810 		max_pfs_per_port = MAX_NUM_PFS_BB;
1811 	} else {
1812 		return DBG_STATUS_UNKNOWN_CHIP;
1813 	}
1814 
1815 	/* Set platofrm */
1816 	dev_data->platform_id = PLATFORM_ASIC;
1817 	dev_data->mode_enable[MODE_ASIC] = 1;
1818 
1819 	/* Set port mode */
1820 	switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1821 	case 0:
1822 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1823 		break;
1824 	case 1:
1825 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1826 		break;
1827 	case 2:
1828 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1829 		break;
1830 	}
1831 
1832 	/* Set 100G mode */
1833 	if (dev_data->chip_id == CHIP_BB &&
1834 	    qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1835 		dev_data->mode_enable[MODE_100G] = 1;
1836 
1837 	/* Set number of ports */
1838 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1839 	    dev_data->mode_enable[MODE_100G])
1840 		dev_data->num_ports = 1;
1841 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1842 		dev_data->num_ports = 2;
1843 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1844 		dev_data->num_ports = 4;
1845 
1846 	/* Set number of PFs per port */
1847 	dev_data->num_pfs_per_port = min_t(u32,
1848 					   num_pfs / dev_data->num_ports,
1849 					   max_pfs_per_port);
1850 
1851 	/* Initializes the GRC parameters */
1852 	qed_dbg_grc_init_params(p_hwfn);
1853 
1854 	dev_data->use_dmae = true;
1855 	dev_data->initialized = 1;
1856 
1857 	return DBG_STATUS_OK;
1858 }
1859 
1860 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1861 						    enum block_id block_id)
1862 {
1863 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1864 
1865 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1866 						       MAX_CHIP_IDS +
1867 						       dev_data->chip_id];
1868 }
1869 
1870 /* Reads the FW info structure for the specified Storm from the chip,
1871  * and writes it to the specified fw_info pointer.
1872  */
1873 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1874 				   struct qed_ptt *p_ptt,
1875 				   u8 storm_id, struct fw_info *fw_info)
1876 {
1877 	struct storm_defs *storm = &s_storm_defs[storm_id];
1878 	struct fw_info_location fw_info_location;
1879 	u32 addr, i, *dest;
1880 
1881 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1882 	memset(fw_info, 0, sizeof(*fw_info));
1883 
1884 	/* Read first the address that points to fw_info location.
1885 	 * The address is located in the last line of the Storm RAM.
1886 	 */
1887 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1888 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1889 	       sizeof(fw_info_location);
1890 	dest = (u32 *)&fw_info_location;
1891 
1892 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1893 	     i++, addr += BYTES_IN_DWORD)
1894 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1895 
1896 	/* Read FW version info from Storm RAM */
1897 	if (fw_info_location.size > 0 && fw_info_location.size <=
1898 	    sizeof(*fw_info)) {
1899 		addr = fw_info_location.grc_addr;
1900 		dest = (u32 *)fw_info;
1901 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1902 		     i++, addr += BYTES_IN_DWORD)
1903 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1904 	}
1905 }
1906 
1907 /* Dumps the specified string to the specified buffer.
1908  * Returns the dumped size in bytes.
1909  */
1910 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1911 {
1912 	if (dump)
1913 		strcpy(dump_buf, str);
1914 
1915 	return (u32)strlen(str) + 1;
1916 }
1917 
1918 /* Dumps zeros to align the specified buffer to dwords.
1919  * Returns the dumped size in bytes.
1920  */
1921 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1922 {
1923 	u8 offset_in_dword, align_size;
1924 
1925 	offset_in_dword = (u8)(byte_offset & 0x3);
1926 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1927 
1928 	if (dump && align_size)
1929 		memset(dump_buf, 0, align_size);
1930 
1931 	return align_size;
1932 }
1933 
1934 /* Writes the specified string param to the specified buffer.
1935  * Returns the dumped size in dwords.
1936  */
1937 static u32 qed_dump_str_param(u32 *dump_buf,
1938 			      bool dump,
1939 			      const char *param_name, const char *param_val)
1940 {
1941 	char *char_buf = (char *)dump_buf;
1942 	u32 offset = 0;
1943 
1944 	/* Dump param name */
1945 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1946 
1947 	/* Indicate a string param value */
1948 	if (dump)
1949 		*(char_buf + offset) = 1;
1950 	offset++;
1951 
1952 	/* Dump param value */
1953 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1954 
1955 	/* Align buffer to next dword */
1956 	offset += qed_dump_align(char_buf + offset, dump, offset);
1957 
1958 	return BYTES_TO_DWORDS(offset);
1959 }
1960 
1961 /* Writes the specified numeric param to the specified buffer.
1962  * Returns the dumped size in dwords.
1963  */
1964 static u32 qed_dump_num_param(u32 *dump_buf,
1965 			      bool dump, const char *param_name, u32 param_val)
1966 {
1967 	char *char_buf = (char *)dump_buf;
1968 	u32 offset = 0;
1969 
1970 	/* Dump param name */
1971 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1972 
1973 	/* Indicate a numeric param value */
1974 	if (dump)
1975 		*(char_buf + offset) = 0;
1976 	offset++;
1977 
1978 	/* Align buffer to next dword */
1979 	offset += qed_dump_align(char_buf + offset, dump, offset);
1980 
1981 	/* Dump param value (and change offset from bytes to dwords) */
1982 	offset = BYTES_TO_DWORDS(offset);
1983 	if (dump)
1984 		*(dump_buf + offset) = param_val;
1985 	offset++;
1986 
1987 	return offset;
1988 }
1989 
1990 /* Reads the FW version and writes it as a param to the specified buffer.
1991  * Returns the dumped size in dwords.
1992  */
1993 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1994 				 struct qed_ptt *p_ptt,
1995 				 u32 *dump_buf, bool dump)
1996 {
1997 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1998 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1999 	struct fw_info fw_info = { {0}, {0} };
2000 	u32 offset = 0;
2001 
2002 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2003 		/* Read FW info from chip */
2004 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
2005 
2006 		/* Create FW version/image strings */
2007 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
2008 			     "%d_%d_%d_%d", fw_info.ver.num.major,
2009 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
2010 			     fw_info.ver.num.eng) < 0)
2011 			DP_NOTICE(p_hwfn,
2012 				  "Unexpected debug error: invalid FW version string\n");
2013 		switch (fw_info.ver.image_id) {
2014 		case FW_IMG_MAIN:
2015 			strcpy(fw_img_str, "main");
2016 			break;
2017 		default:
2018 			strcpy(fw_img_str, "unknown");
2019 			break;
2020 		}
2021 	}
2022 
2023 	/* Dump FW version, image and timestamp */
2024 	offset += qed_dump_str_param(dump_buf + offset,
2025 				     dump, "fw-version", fw_ver_str);
2026 	offset += qed_dump_str_param(dump_buf + offset,
2027 				     dump, "fw-image", fw_img_str);
2028 	offset += qed_dump_num_param(dump_buf + offset,
2029 				     dump,
2030 				     "fw-timestamp", fw_info.ver.timestamp);
2031 
2032 	return offset;
2033 }
2034 
2035 /* Reads the MFW version and writes it as a param to the specified buffer.
2036  * Returns the dumped size in dwords.
2037  */
2038 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2039 				  struct qed_ptt *p_ptt,
2040 				  u32 *dump_buf, bool dump)
2041 {
2042 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2043 
2044 	if (dump &&
2045 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2046 		u32 global_section_offsize, global_section_addr, mfw_ver;
2047 		u32 public_data_addr, global_section_offsize_addr;
2048 
2049 		/* Find MCP public data GRC address. Needs to be ORed with
2050 		 * MCP_REG_SCRATCH due to a HW bug.
2051 		 */
2052 		public_data_addr = qed_rd(p_hwfn,
2053 					  p_ptt,
2054 					  MISC_REG_SHARED_MEM_ADDR) |
2055 				   MCP_REG_SCRATCH;
2056 
2057 		/* Find MCP public global section offset */
2058 		global_section_offsize_addr = public_data_addr +
2059 					      offsetof(struct mcp_public_data,
2060 						       sections) +
2061 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2062 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2063 						global_section_offsize_addr);
2064 		global_section_addr =
2065 			MCP_REG_SCRATCH +
2066 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2067 
2068 		/* Read MFW version from MCP public global section */
2069 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2070 				 global_section_addr +
2071 				 offsetof(struct public_global, mfw_ver));
2072 
2073 		/* Dump MFW version param */
2074 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2075 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2076 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2077 			DP_NOTICE(p_hwfn,
2078 				  "Unexpected debug error: invalid MFW version string\n");
2079 	}
2080 
2081 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2082 }
2083 
2084 /* Writes a section header to the specified buffer.
2085  * Returns the dumped size in dwords.
2086  */
2087 static u32 qed_dump_section_hdr(u32 *dump_buf,
2088 				bool dump, const char *name, u32 num_params)
2089 {
2090 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2091 }
2092 
2093 /* Writes the common global params to the specified buffer.
2094  * Returns the dumped size in dwords.
2095  */
2096 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2097 					 struct qed_ptt *p_ptt,
2098 					 u32 *dump_buf,
2099 					 bool dump,
2100 					 u8 num_specific_global_params)
2101 {
2102 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2103 	u32 offset = 0;
2104 	u8 num_params;
2105 
2106 	/* Dump global params section header */
2107 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2108 	offset += qed_dump_section_hdr(dump_buf + offset,
2109 				       dump, "global_params", num_params);
2110 
2111 	/* Store params */
2112 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2113 	offset += qed_dump_mfw_ver_param(p_hwfn,
2114 					 p_ptt, dump_buf + offset, dump);
2115 	offset += qed_dump_num_param(dump_buf + offset,
2116 				     dump, "tools-version", TOOLS_VERSION);
2117 	offset += qed_dump_str_param(dump_buf + offset,
2118 				     dump,
2119 				     "chip",
2120 				     s_chip_defs[dev_data->chip_id].name);
2121 	offset += qed_dump_str_param(dump_buf + offset,
2122 				     dump,
2123 				     "platform",
2124 				     s_platform_defs[dev_data->platform_id].
2125 				     name);
2126 	offset +=
2127 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2128 			       p_hwfn->abs_pf_id);
2129 
2130 	return offset;
2131 }
2132 
2133 /* Writes the "last" section (including CRC) to the specified buffer at the
2134  * given offset. Returns the dumped size in dwords.
2135  */
2136 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2137 {
2138 	u32 start_offset = offset;
2139 
2140 	/* Dump CRC section header */
2141 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2142 
2143 	/* Calculate CRC32 and add it to the dword after the "last" section */
2144 	if (dump)
2145 		*(dump_buf + offset) = ~crc32(0xffffffff,
2146 					      (u8 *)dump_buf,
2147 					      DWORDS_TO_BYTES(offset));
2148 
2149 	offset++;
2150 
2151 	return offset - start_offset;
2152 }
2153 
2154 /* Update blocks reset state  */
2155 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2156 					  struct qed_ptt *p_ptt)
2157 {
2158 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2159 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2160 	u32 i;
2161 
2162 	/* Read reset registers */
2163 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2164 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2165 			reg_val[i] = qed_rd(p_hwfn,
2166 					    p_ptt, s_reset_regs_defs[i].addr);
2167 
2168 	/* Check if blocks are in reset */
2169 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2170 		struct block_defs *block = s_block_defs[i];
2171 
2172 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2173 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2174 	}
2175 }
2176 
2177 /* Enable / disable the Debug block */
2178 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2179 				     struct qed_ptt *p_ptt, bool enable)
2180 {
2181 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2182 }
2183 
2184 /* Resets the Debug block */
2185 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2186 				    struct qed_ptt *p_ptt)
2187 {
2188 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2189 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2190 
2191 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2192 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2193 	new_reset_reg_val =
2194 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2195 
2196 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2197 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2198 }
2199 
2200 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2201 				     struct qed_ptt *p_ptt,
2202 				     enum dbg_bus_frame_modes mode)
2203 {
2204 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2205 }
2206 
2207 /* Enable / disable Debug Bus clients according to the specified mask
2208  * (1 = enable, 0 = disable).
2209  */
2210 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2211 				   struct qed_ptt *p_ptt, u32 client_mask)
2212 {
2213 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2214 }
2215 
2216 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2217 {
2218 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2219 	bool arg1, arg2;
2220 	const u32 *ptr;
2221 	u8 tree_val;
2222 
2223 	/* Get next element from modes tree buffer */
2224 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2225 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2226 
2227 	switch (tree_val) {
2228 	case INIT_MODE_OP_NOT:
2229 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2230 	case INIT_MODE_OP_OR:
2231 	case INIT_MODE_OP_AND:
2232 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2233 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2234 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2235 							arg2) : (arg1 && arg2);
2236 	default:
2237 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2238 	}
2239 }
2240 
2241 /* Returns true if the specified entity (indicated by GRC param) should be
2242  * included in the dump, false otherwise.
2243  */
2244 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2245 				enum dbg_grc_params grc_param)
2246 {
2247 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2248 }
2249 
2250 /* Returns true of the specified Storm should be included in the dump, false
2251  * otherwise.
2252  */
2253 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2254 				      enum dbg_storms storm)
2255 {
2256 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2257 }
2258 
2259 /* Returns true if the specified memory should be included in the dump, false
2260  * otherwise.
2261  */
2262 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2263 				    enum block_id block_id, u8 mem_group_id)
2264 {
2265 	struct block_defs *block = s_block_defs[block_id];
2266 	u8 i;
2267 
2268 	/* Check Storm match */
2269 	if (block->associated_to_storm &&
2270 	    !qed_grc_is_storm_included(p_hwfn,
2271 				       (enum dbg_storms)block->storm_id))
2272 		return false;
2273 
2274 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2275 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2276 
2277 		if (mem_group_id == big_ram->mem_group_id ||
2278 		    mem_group_id == big_ram->ram_mem_group_id)
2279 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2280 	}
2281 
2282 	switch (mem_group_id) {
2283 	case MEM_GROUP_PXP_ILT:
2284 	case MEM_GROUP_PXP_MEM:
2285 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2286 	case MEM_GROUP_RAM:
2287 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2288 	case MEM_GROUP_PBUF:
2289 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2290 	case MEM_GROUP_CAU_MEM:
2291 	case MEM_GROUP_CAU_SB:
2292 	case MEM_GROUP_CAU_PI:
2293 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2294 	case MEM_GROUP_QM_MEM:
2295 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2296 	case MEM_GROUP_CFC_MEM:
2297 	case MEM_GROUP_CONN_CFC_MEM:
2298 	case MEM_GROUP_TASK_CFC_MEM:
2299 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2300 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2301 	case MEM_GROUP_IGU_MEM:
2302 	case MEM_GROUP_IGU_MSIX:
2303 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2304 	case MEM_GROUP_MULD_MEM:
2305 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2306 	case MEM_GROUP_PRS_MEM:
2307 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2308 	case MEM_GROUP_DMAE_MEM:
2309 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2310 	case MEM_GROUP_TM_MEM:
2311 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2312 	case MEM_GROUP_SDM_MEM:
2313 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2314 	case MEM_GROUP_TDIF_CTX:
2315 	case MEM_GROUP_RDIF_CTX:
2316 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2317 	case MEM_GROUP_CM_MEM:
2318 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2319 	case MEM_GROUP_IOR:
2320 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2321 	default:
2322 		return true;
2323 	}
2324 }
2325 
2326 /* Stalls all Storms */
2327 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2328 				 struct qed_ptt *p_ptt, bool stall)
2329 {
2330 	u32 reg_addr;
2331 	u8 storm_id;
2332 
2333 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2334 		if (!qed_grc_is_storm_included(p_hwfn,
2335 					       (enum dbg_storms)storm_id))
2336 			continue;
2337 
2338 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2339 		    SEM_FAST_REG_STALL_0_BB_K2;
2340 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2341 	}
2342 
2343 	msleep(STALL_DELAY_MS);
2344 }
2345 
2346 /* Takes all blocks out of reset */
2347 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2348 				   struct qed_ptt *p_ptt)
2349 {
2350 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2351 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2352 	u32 block_id, i;
2353 
2354 	/* Fill reset regs values */
2355 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2356 		struct block_defs *block = s_block_defs[block_id];
2357 
2358 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2359 		    block->unreset)
2360 			reg_val[block->reset_reg] |=
2361 			    BIT(block->reset_bit_offset);
2362 	}
2363 
2364 	/* Write reset registers */
2365 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2366 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2367 			continue;
2368 
2369 		reg_val[i] |=
2370 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2371 
2372 		if (reg_val[i])
2373 			qed_wr(p_hwfn,
2374 			       p_ptt,
2375 			       s_reset_regs_defs[i].addr +
2376 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2377 	}
2378 }
2379 
2380 /* Returns the attention block data of the specified block */
2381 static const struct dbg_attn_block_type_data *
2382 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2383 {
2384 	const struct dbg_attn_block *base_attn_block_arr =
2385 		(const struct dbg_attn_block *)
2386 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2387 
2388 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2389 }
2390 
2391 /* Returns the attention registers of the specified block */
2392 static const struct dbg_attn_reg *
2393 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2394 			u8 *num_attn_regs)
2395 {
2396 	const struct dbg_attn_block_type_data *block_type_data =
2397 		qed_get_block_attn_data(block_id, attn_type);
2398 
2399 	*num_attn_regs = block_type_data->num_regs;
2400 
2401 	return &((const struct dbg_attn_reg *)
2402 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2403 							  regs_offset];
2404 }
2405 
2406 /* For each block, clear the status of all parities */
2407 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2408 				   struct qed_ptt *p_ptt)
2409 {
2410 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2411 	const struct dbg_attn_reg *attn_reg_arr;
2412 	u8 reg_idx, num_attn_regs;
2413 	u32 block_id;
2414 
2415 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2416 		if (dev_data->block_in_reset[block_id])
2417 			continue;
2418 
2419 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2420 						       ATTN_TYPE_PARITY,
2421 						       &num_attn_regs);
2422 
2423 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2424 			const struct dbg_attn_reg *reg_data =
2425 				&attn_reg_arr[reg_idx];
2426 			u16 modes_buf_offset;
2427 			bool eval_mode;
2428 
2429 			/* Check mode */
2430 			eval_mode = GET_FIELD(reg_data->mode.data,
2431 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2432 			modes_buf_offset =
2433 				GET_FIELD(reg_data->mode.data,
2434 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2435 
2436 			/* If Mode match: clear parity status */
2437 			if (!eval_mode ||
2438 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2439 				qed_rd(p_hwfn, p_ptt,
2440 				       DWORDS_TO_BYTES(reg_data->
2441 						       sts_clr_address));
2442 		}
2443 	}
2444 }
2445 
2446 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2447  * The following parameters are dumped:
2448  * - count: no. of dumped entries
2449  * - split_type: split type
2450  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2451  * - param_name: user parameter value (dumped only if param_name != NULL
2452  *		 and param_val != NULL).
2453  */
2454 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2455 				 bool dump,
2456 				 u32 num_reg_entries,
2457 				 enum init_split_types split_type,
2458 				 u8 split_id,
2459 				 const char *param_name, const char *param_val)
2460 {
2461 	u8 num_params = 2 +
2462 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2463 	u32 offset = 0;
2464 
2465 	offset += qed_dump_section_hdr(dump_buf + offset,
2466 				       dump, "grc_regs", num_params);
2467 	offset += qed_dump_num_param(dump_buf + offset,
2468 				     dump, "count", num_reg_entries);
2469 	offset += qed_dump_str_param(dump_buf + offset,
2470 				     dump, "split",
2471 				     s_split_type_defs[split_type].name);
2472 	if (split_type != SPLIT_TYPE_NONE)
2473 		offset += qed_dump_num_param(dump_buf + offset,
2474 					     dump, "id", split_id);
2475 	if (param_name && param_val)
2476 		offset += qed_dump_str_param(dump_buf + offset,
2477 					     dump, param_name, param_val);
2478 
2479 	return offset;
2480 }
2481 
2482 /* Reads the specified registers into the specified buffer.
2483  * The addr and len arguments are specified in dwords.
2484  */
2485 void qed_read_regs(struct qed_hwfn *p_hwfn,
2486 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2487 {
2488 	u32 i;
2489 
2490 	for (i = 0; i < len; i++)
2491 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2492 }
2493 
2494 /* Dumps the GRC registers in the specified address range.
2495  * Returns the dumped size in dwords.
2496  * The addr and len arguments are specified in dwords.
2497  */
2498 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2499 				   struct qed_ptt *p_ptt,
2500 				   u32 *dump_buf,
2501 				   bool dump, u32 addr, u32 len, bool wide_bus,
2502 				   enum init_split_types split_type,
2503 				   u8 split_id)
2504 {
2505 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2506 	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
2507 
2508 	if (!dump)
2509 		return len;
2510 
2511 	/* Print log if needed */
2512 	dev_data->num_regs_read += len;
2513 	if (dev_data->num_regs_read >=
2514 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2515 		DP_VERBOSE(p_hwfn,
2516 			   QED_MSG_DEBUG,
2517 			   "Dumping %d registers...\n",
2518 			   dev_data->num_regs_read);
2519 		dev_data->num_regs_read = 0;
2520 	}
2521 
2522 	switch (split_type) {
2523 	case SPLIT_TYPE_PORT:
2524 		port_id = split_id;
2525 		break;
2526 	case SPLIT_TYPE_PF:
2527 		pf_id = split_id;
2528 		break;
2529 	case SPLIT_TYPE_PORT_PF:
2530 		port_id = split_id / dev_data->num_pfs_per_port;
2531 		pf_id = port_id + dev_data->num_ports *
2532 		    (split_id % dev_data->num_pfs_per_port);
2533 		break;
2534 	case SPLIT_TYPE_VF:
2535 		vf_id = split_id;
2536 		break;
2537 	default:
2538 		break;
2539 	}
2540 
2541 	/* Try reading using DMAE */
2542 	if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2543 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2544 	     wide_bus)) {
2545 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2546 				       (u64)(uintptr_t)(dump_buf), len, NULL))
2547 			return len;
2548 		dev_data->use_dmae = 0;
2549 		DP_VERBOSE(p_hwfn,
2550 			   QED_MSG_DEBUG,
2551 			   "Failed reading from chip using DMAE, using GRC instead\n");
2552 	}
2553 
2554 	/* If not read using DMAE, read using GRC */
2555 
2556 	/* Set pretend */
2557 	if (split_type != dev_data->pretend.split_type || split_id !=
2558 	    dev_data->pretend.split_id) {
2559 		switch (split_type) {
2560 		case SPLIT_TYPE_PORT:
2561 			qed_port_pretend(p_hwfn, p_ptt, port_id);
2562 			break;
2563 		case SPLIT_TYPE_PF:
2564 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2565 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2566 			break;
2567 		case SPLIT_TYPE_PORT_PF:
2568 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2569 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2570 			break;
2571 		case SPLIT_TYPE_VF:
2572 			fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2573 			      (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
2574 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2575 			break;
2576 		default:
2577 			break;
2578 		}
2579 
2580 		dev_data->pretend.split_type = (u8)split_type;
2581 		dev_data->pretend.split_id = split_id;
2582 	}
2583 
2584 	/* Read registers using GRC */
2585 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2586 
2587 	return len;
2588 }
2589 
2590 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2591  * The addr and len arguments are specified in dwords.
2592  */
2593 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2594 				      bool dump, u32 addr, u32 len)
2595 {
2596 	if (dump)
2597 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2598 
2599 	return 1;
2600 }
2601 
2602 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2603  * The addr and len arguments are specified in dwords.
2604  */
2605 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2606 				  struct qed_ptt *p_ptt,
2607 				  u32 *dump_buf,
2608 				  bool dump, u32 addr, u32 len, bool wide_bus,
2609 				  enum init_split_types split_type, u8 split_id)
2610 {
2611 	u32 offset = 0;
2612 
2613 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2614 	offset += qed_grc_dump_addr_range(p_hwfn,
2615 					  p_ptt,
2616 					  dump_buf + offset,
2617 					  dump, addr, len, wide_bus,
2618 					  split_type, split_id);
2619 
2620 	return offset;
2621 }
2622 
2623 /* Dumps GRC registers sequence with skip cycle.
2624  * Returns the dumped size in dwords.
2625  * - addr:	start GRC address in dwords
2626  * - total_len:	total no. of dwords to dump
2627  * - read_len:	no. consecutive dwords to read
2628  * - skip_len:	no. of dwords to skip (and fill with zeros)
2629  */
2630 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2631 				       struct qed_ptt *p_ptt,
2632 				       u32 *dump_buf,
2633 				       bool dump,
2634 				       u32 addr,
2635 				       u32 total_len,
2636 				       u32 read_len, u32 skip_len)
2637 {
2638 	u32 offset = 0, reg_offset = 0;
2639 
2640 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2641 
2642 	if (!dump)
2643 		return offset + total_len;
2644 
2645 	while (reg_offset < total_len) {
2646 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2647 
2648 		offset += qed_grc_dump_addr_range(p_hwfn,
2649 						  p_ptt,
2650 						  dump_buf + offset,
2651 						  dump,  addr, curr_len, false,
2652 						  SPLIT_TYPE_NONE, 0);
2653 		reg_offset += curr_len;
2654 		addr += curr_len;
2655 
2656 		if (reg_offset < total_len) {
2657 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2658 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2659 			offset += curr_len;
2660 			reg_offset += curr_len;
2661 			addr += curr_len;
2662 		}
2663 	}
2664 
2665 	return offset;
2666 }
2667 
2668 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2669 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2670 				     struct qed_ptt *p_ptt,
2671 				     struct dbg_array input_regs_arr,
2672 				     u32 *dump_buf,
2673 				     bool dump,
2674 				     enum init_split_types split_type,
2675 				     u8 split_id,
2676 				     bool block_enable[MAX_BLOCK_ID],
2677 				     u32 *num_dumped_reg_entries)
2678 {
2679 	u32 i, offset = 0, input_offset = 0;
2680 	bool mode_match = true;
2681 
2682 	*num_dumped_reg_entries = 0;
2683 
2684 	while (input_offset < input_regs_arr.size_in_dwords) {
2685 		const struct dbg_dump_cond_hdr *cond_hdr =
2686 		    (const struct dbg_dump_cond_hdr *)
2687 		    &input_regs_arr.ptr[input_offset++];
2688 		u16 modes_buf_offset;
2689 		bool eval_mode;
2690 
2691 		/* Check mode/block */
2692 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2693 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2694 		if (eval_mode) {
2695 			modes_buf_offset =
2696 				GET_FIELD(cond_hdr->mode.data,
2697 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2698 			mode_match = qed_is_mode_match(p_hwfn,
2699 						       &modes_buf_offset);
2700 		}
2701 
2702 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2703 			input_offset += cond_hdr->data_size;
2704 			continue;
2705 		}
2706 
2707 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2708 			const struct dbg_dump_reg *reg =
2709 			    (const struct dbg_dump_reg *)
2710 			    &input_regs_arr.ptr[input_offset];
2711 			u32 addr, len;
2712 			bool wide_bus;
2713 
2714 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2715 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2716 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2717 			offset += qed_grc_dump_reg_entry(p_hwfn,
2718 							 p_ptt,
2719 							 dump_buf + offset,
2720 							 dump,
2721 							 addr,
2722 							 len,
2723 							 wide_bus,
2724 							 split_type, split_id);
2725 			(*num_dumped_reg_entries)++;
2726 		}
2727 	}
2728 
2729 	return offset;
2730 }
2731 
2732 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2733 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2734 				   struct qed_ptt *p_ptt,
2735 				   struct dbg_array input_regs_arr,
2736 				   u32 *dump_buf,
2737 				   bool dump,
2738 				   bool block_enable[MAX_BLOCK_ID],
2739 				   enum init_split_types split_type,
2740 				   u8 split_id,
2741 				   const char *param_name,
2742 				   const char *param_val)
2743 {
2744 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2745 	enum init_split_types hdr_split_type = split_type;
2746 	u32 num_dumped_reg_entries, offset;
2747 	u8 hdr_split_id = split_id;
2748 
2749 	/* In PORT_PF split type, print a port split header */
2750 	if (split_type == SPLIT_TYPE_PORT_PF) {
2751 		hdr_split_type = SPLIT_TYPE_PORT;
2752 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2753 	}
2754 
2755 	/* Calculate register dump header size (and skip it for now) */
2756 	offset = qed_grc_dump_regs_hdr(dump_buf,
2757 				       false,
2758 				       0,
2759 				       hdr_split_type,
2760 				       hdr_split_id, param_name, param_val);
2761 
2762 	/* Dump registers */
2763 	offset += qed_grc_dump_regs_entries(p_hwfn,
2764 					    p_ptt,
2765 					    input_regs_arr,
2766 					    dump_buf + offset,
2767 					    dump,
2768 					    split_type,
2769 					    split_id,
2770 					    block_enable,
2771 					    &num_dumped_reg_entries);
2772 
2773 	/* Write register dump header */
2774 	if (dump && num_dumped_reg_entries > 0)
2775 		qed_grc_dump_regs_hdr(dump_buf,
2776 				      dump,
2777 				      num_dumped_reg_entries,
2778 				      hdr_split_type,
2779 				      hdr_split_id, param_name, param_val);
2780 
2781 	return num_dumped_reg_entries > 0 ? offset : 0;
2782 }
2783 
2784 /* Dumps registers according to the input registers array. Returns the dumped
2785  * size in dwords.
2786  */
2787 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2788 				  struct qed_ptt *p_ptt,
2789 				  u32 *dump_buf,
2790 				  bool dump,
2791 				  bool block_enable[MAX_BLOCK_ID],
2792 				  const char *param_name, const char *param_val)
2793 {
2794 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2795 	u32 offset = 0, input_offset = 0;
2796 	u16 fid;
2797 	while (input_offset <
2798 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2799 		const struct dbg_dump_split_hdr *split_hdr;
2800 		struct dbg_array curr_input_regs_arr;
2801 		enum init_split_types split_type;
2802 		u16 split_count = 0;
2803 		u32 split_data_size;
2804 		u8 split_id;
2805 
2806 		split_hdr =
2807 			(const struct dbg_dump_split_hdr *)
2808 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2809 		split_type =
2810 			GET_FIELD(split_hdr->hdr,
2811 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2812 		split_data_size =
2813 			GET_FIELD(split_hdr->hdr,
2814 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2815 		curr_input_regs_arr.ptr =
2816 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2817 		curr_input_regs_arr.size_in_dwords = split_data_size;
2818 
2819 		switch (split_type) {
2820 		case SPLIT_TYPE_NONE:
2821 			split_count = 1;
2822 			break;
2823 		case SPLIT_TYPE_PORT:
2824 			split_count = dev_data->num_ports;
2825 			break;
2826 		case SPLIT_TYPE_PF:
2827 		case SPLIT_TYPE_PORT_PF:
2828 			split_count = dev_data->num_ports *
2829 			    dev_data->num_pfs_per_port;
2830 			break;
2831 		case SPLIT_TYPE_VF:
2832 			split_count = dev_data->num_vfs;
2833 			break;
2834 		default:
2835 			return 0;
2836 		}
2837 
2838 		for (split_id = 0; split_id < split_count; split_id++)
2839 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2840 							  curr_input_regs_arr,
2841 							  dump_buf + offset,
2842 							  dump, block_enable,
2843 							  split_type,
2844 							  split_id,
2845 							  param_name,
2846 							  param_val);
2847 
2848 		input_offset += split_data_size;
2849 	}
2850 
2851 	/* Cancel pretends (pretend to original PF) */
2852 	if (dump) {
2853 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2854 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2855 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2856 		dev_data->pretend.split_id = 0;
2857 	}
2858 
2859 	return offset;
2860 }
2861 
2862 /* Dump reset registers. Returns the dumped size in dwords. */
2863 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2864 				   struct qed_ptt *p_ptt,
2865 				   u32 *dump_buf, bool dump)
2866 {
2867 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2868 	u32 i, offset = 0, num_regs = 0;
2869 
2870 	/* Calculate header size */
2871 	offset += qed_grc_dump_regs_hdr(dump_buf,
2872 					false, 0,
2873 					SPLIT_TYPE_NONE, 0, NULL, NULL);
2874 
2875 	/* Write reset registers */
2876 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2877 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2878 			continue;
2879 
2880 		offset += qed_grc_dump_reg_entry(p_hwfn,
2881 						 p_ptt,
2882 						 dump_buf + offset,
2883 						 dump,
2884 						 BYTES_TO_DWORDS
2885 						 (s_reset_regs_defs[i].addr), 1,
2886 						 false, SPLIT_TYPE_NONE, 0);
2887 		num_regs++;
2888 	}
2889 
2890 	/* Write header */
2891 	if (dump)
2892 		qed_grc_dump_regs_hdr(dump_buf,
2893 				      true, num_regs, SPLIT_TYPE_NONE,
2894 				      0, NULL, NULL);
2895 
2896 	return offset;
2897 }
2898 
2899 /* Dump registers that are modified during GRC Dump and therefore must be
2900  * dumped first. Returns the dumped size in dwords.
2901  */
2902 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2903 				      struct qed_ptt *p_ptt,
2904 				      u32 *dump_buf, bool dump)
2905 {
2906 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2907 	u32 block_id, offset = 0, num_reg_entries = 0;
2908 	const struct dbg_attn_reg *attn_reg_arr;
2909 	u8 storm_id, reg_idx, num_attn_regs;
2910 
2911 	/* Calculate header size */
2912 	offset += qed_grc_dump_regs_hdr(dump_buf,
2913 					false, 0, SPLIT_TYPE_NONE,
2914 					0, NULL, NULL);
2915 
2916 	/* Write parity registers */
2917 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2918 		if (dev_data->block_in_reset[block_id] && dump)
2919 			continue;
2920 
2921 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2922 						       ATTN_TYPE_PARITY,
2923 						       &num_attn_regs);
2924 
2925 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2926 			const struct dbg_attn_reg *reg_data =
2927 				&attn_reg_arr[reg_idx];
2928 			u16 modes_buf_offset;
2929 			bool eval_mode;
2930 			u32 addr;
2931 
2932 			/* Check mode */
2933 			eval_mode = GET_FIELD(reg_data->mode.data,
2934 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2935 			modes_buf_offset =
2936 				GET_FIELD(reg_data->mode.data,
2937 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2938 			if (eval_mode &&
2939 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2940 				continue;
2941 
2942 			/* Mode match: read & dump registers */
2943 			addr = reg_data->mask_address;
2944 			offset += qed_grc_dump_reg_entry(p_hwfn,
2945 							 p_ptt,
2946 							 dump_buf + offset,
2947 							 dump,
2948 							 addr,
2949 							 1, false,
2950 							 SPLIT_TYPE_NONE, 0);
2951 			addr = GET_FIELD(reg_data->data,
2952 					 DBG_ATTN_REG_STS_ADDRESS);
2953 			offset += qed_grc_dump_reg_entry(p_hwfn,
2954 							 p_ptt,
2955 							 dump_buf + offset,
2956 							 dump,
2957 							 addr,
2958 							 1, false,
2959 							 SPLIT_TYPE_NONE, 0);
2960 			num_reg_entries += 2;
2961 		}
2962 	}
2963 
2964 	/* Write Storm stall status registers */
2965 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2966 		struct storm_defs *storm = &s_storm_defs[storm_id];
2967 		u32 addr;
2968 
2969 		if (dev_data->block_in_reset[storm->block_id] && dump)
2970 			continue;
2971 
2972 		addr =
2973 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2974 				    SEM_FAST_REG_STALLED);
2975 		offset += qed_grc_dump_reg_entry(p_hwfn,
2976 						 p_ptt,
2977 						 dump_buf + offset,
2978 						 dump,
2979 						 addr,
2980 						 1,
2981 						 false, SPLIT_TYPE_NONE, 0);
2982 		num_reg_entries++;
2983 	}
2984 
2985 	/* Write header */
2986 	if (dump)
2987 		qed_grc_dump_regs_hdr(dump_buf,
2988 				      true,
2989 				      num_reg_entries, SPLIT_TYPE_NONE,
2990 				      0, NULL, NULL);
2991 
2992 	return offset;
2993 }
2994 
2995 /* Dumps registers that can't be represented in the debug arrays */
2996 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2997 				     struct qed_ptt *p_ptt,
2998 				     u32 *dump_buf, bool dump)
2999 {
3000 	u32 offset = 0, addr;
3001 
3002 	offset += qed_grc_dump_regs_hdr(dump_buf,
3003 					dump, 2, SPLIT_TYPE_NONE, 0,
3004 					NULL, NULL);
3005 
3006 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3007 	 * skipped).
3008 	 */
3009 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3010 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3011 					      p_ptt,
3012 					      dump_buf + offset,
3013 					      dump,
3014 					      addr,
3015 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3016 					      7,
3017 					      1);
3018 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3019 	offset +=
3020 	    qed_grc_dump_reg_entry_skip(p_hwfn,
3021 					p_ptt,
3022 					dump_buf + offset,
3023 					dump,
3024 					addr,
3025 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3026 					7,
3027 					1);
3028 
3029 	return offset;
3030 }
3031 
3032 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3033  * dwords. The following parameters are dumped:
3034  * - name:	   dumped only if it's not NULL.
3035  * - addr:	   in dwords, dumped only if name is NULL.
3036  * - len:	   in dwords, always dumped.
3037  * - width:	   dumped if it's not zero.
3038  * - packed:	   dumped only if it's not false.
3039  * - mem_group:	   always dumped.
3040  * - is_storm:	   true only if the memory is related to a Storm.
3041  * - storm_letter: valid only if is_storm is true.
3042  *
3043  */
3044 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3045 				u32 *dump_buf,
3046 				bool dump,
3047 				const char *name,
3048 				u32 addr,
3049 				u32 len,
3050 				u32 bit_width,
3051 				bool packed,
3052 				const char *mem_group,
3053 				bool is_storm, char storm_letter)
3054 {
3055 	u8 num_params = 3;
3056 	u32 offset = 0;
3057 	char buf[64];
3058 
3059 	if (!len)
3060 		DP_NOTICE(p_hwfn,
3061 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3062 
3063 	if (bit_width)
3064 		num_params++;
3065 	if (packed)
3066 		num_params++;
3067 
3068 	/* Dump section header */
3069 	offset += qed_dump_section_hdr(dump_buf + offset,
3070 				       dump, "grc_mem", num_params);
3071 
3072 	if (name) {
3073 		/* Dump name */
3074 		if (is_storm) {
3075 			strcpy(buf, "?STORM_");
3076 			buf[0] = storm_letter;
3077 			strcpy(buf + strlen(buf), name);
3078 		} else {
3079 			strcpy(buf, name);
3080 		}
3081 
3082 		offset += qed_dump_str_param(dump_buf + offset,
3083 					     dump, "name", buf);
3084 	} else {
3085 		/* Dump address */
3086 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3087 
3088 		offset += qed_dump_num_param(dump_buf + offset,
3089 					     dump, "addr", addr_in_bytes);
3090 	}
3091 
3092 	/* Dump len */
3093 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3094 
3095 	/* Dump bit width */
3096 	if (bit_width)
3097 		offset += qed_dump_num_param(dump_buf + offset,
3098 					     dump, "width", bit_width);
3099 
3100 	/* Dump packed */
3101 	if (packed)
3102 		offset += qed_dump_num_param(dump_buf + offset,
3103 					     dump, "packed", 1);
3104 
3105 	/* Dump reg type */
3106 	if (is_storm) {
3107 		strcpy(buf, "?STORM_");
3108 		buf[0] = storm_letter;
3109 		strcpy(buf + strlen(buf), mem_group);
3110 	} else {
3111 		strcpy(buf, mem_group);
3112 	}
3113 
3114 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3115 
3116 	return offset;
3117 }
3118 
3119 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3120  * Returns the dumped size in dwords.
3121  * The addr and len arguments are specified in dwords.
3122  */
3123 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3124 			    struct qed_ptt *p_ptt,
3125 			    u32 *dump_buf,
3126 			    bool dump,
3127 			    const char *name,
3128 			    u32 addr,
3129 			    u32 len,
3130 			    bool wide_bus,
3131 			    u32 bit_width,
3132 			    bool packed,
3133 			    const char *mem_group,
3134 			    bool is_storm, char storm_letter)
3135 {
3136 	u32 offset = 0;
3137 
3138 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3139 				       dump_buf + offset,
3140 				       dump,
3141 				       name,
3142 				       addr,
3143 				       len,
3144 				       bit_width,
3145 				       packed,
3146 				       mem_group, is_storm, storm_letter);
3147 	offset += qed_grc_dump_addr_range(p_hwfn,
3148 					  p_ptt,
3149 					  dump_buf + offset,
3150 					  dump, addr, len, wide_bus,
3151 					  SPLIT_TYPE_NONE, 0);
3152 
3153 	return offset;
3154 }
3155 
3156 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3157 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3158 				    struct qed_ptt *p_ptt,
3159 				    struct dbg_array input_mems_arr,
3160 				    u32 *dump_buf, bool dump)
3161 {
3162 	u32 i, offset = 0, input_offset = 0;
3163 	bool mode_match = true;
3164 
3165 	while (input_offset < input_mems_arr.size_in_dwords) {
3166 		const struct dbg_dump_cond_hdr *cond_hdr;
3167 		u16 modes_buf_offset;
3168 		u32 num_entries;
3169 		bool eval_mode;
3170 
3171 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3172 			   &input_mems_arr.ptr[input_offset++];
3173 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3174 
3175 		/* Check required mode */
3176 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3177 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3178 		if (eval_mode) {
3179 			modes_buf_offset =
3180 				GET_FIELD(cond_hdr->mode.data,
3181 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3182 			mode_match = qed_is_mode_match(p_hwfn,
3183 						       &modes_buf_offset);
3184 		}
3185 
3186 		if (!mode_match) {
3187 			input_offset += cond_hdr->data_size;
3188 			continue;
3189 		}
3190 
3191 		for (i = 0; i < num_entries;
3192 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3193 			const struct dbg_dump_mem *mem =
3194 				(const struct dbg_dump_mem *)
3195 				&input_mems_arr.ptr[input_offset];
3196 			u8 mem_group_id = GET_FIELD(mem->dword0,
3197 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3198 			bool is_storm = false, mem_wide_bus;
3199 			enum dbg_grc_params grc_param;
3200 			char storm_letter = 'a';
3201 			enum block_id block_id;
3202 			u32 mem_addr, mem_len;
3203 
3204 			if (mem_group_id >= MEM_GROUPS_NUM) {
3205 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3206 				return 0;
3207 			}
3208 
3209 			block_id = (enum block_id)cond_hdr->block_id;
3210 			if (!qed_grc_is_mem_included(p_hwfn,
3211 						     block_id,
3212 						     mem_group_id))
3213 				continue;
3214 
3215 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3216 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3217 			mem_wide_bus = GET_FIELD(mem->dword1,
3218 						 DBG_DUMP_MEM_WIDE_BUS);
3219 
3220 			/* Update memory length for CCFC/TCFC memories
3221 			 * according to number of LCIDs/LTIDs.
3222 			 */
3223 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3224 				if (mem_len % MAX_LCIDS) {
3225 					DP_NOTICE(p_hwfn,
3226 						  "Invalid CCFC connection memory size\n");
3227 					return 0;
3228 				}
3229 
3230 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3231 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3232 					  (mem_len / MAX_LCIDS);
3233 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3234 				if (mem_len % MAX_LTIDS) {
3235 					DP_NOTICE(p_hwfn,
3236 						  "Invalid TCFC task memory size\n");
3237 					return 0;
3238 				}
3239 
3240 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3241 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3242 					  (mem_len / MAX_LTIDS);
3243 			}
3244 
3245 			/* If memory is associated with Storm, update Storm
3246 			 * details.
3247 			 */
3248 			if (s_block_defs
3249 			    [cond_hdr->block_id]->associated_to_storm) {
3250 				is_storm = true;
3251 				storm_letter =
3252 				    s_storm_defs[s_block_defs
3253 						 [cond_hdr->block_id]->
3254 						 storm_id].letter;
3255 			}
3256 
3257 			/* Dump memory */
3258 			offset += qed_grc_dump_mem(p_hwfn,
3259 						p_ptt,
3260 						dump_buf + offset,
3261 						dump,
3262 						NULL,
3263 						mem_addr,
3264 						mem_len,
3265 						mem_wide_bus,
3266 						0,
3267 						false,
3268 						s_mem_group_names[mem_group_id],
3269 						is_storm,
3270 						storm_letter);
3271 		}
3272 	}
3273 
3274 	return offset;
3275 }
3276 
3277 /* Dumps GRC memories according to the input array dump_mem.
3278  * Returns the dumped size in dwords.
3279  */
3280 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3281 				 struct qed_ptt *p_ptt,
3282 				 u32 *dump_buf, bool dump)
3283 {
3284 	u32 offset = 0, input_offset = 0;
3285 
3286 	while (input_offset <
3287 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3288 		const struct dbg_dump_split_hdr *split_hdr;
3289 		struct dbg_array curr_input_mems_arr;
3290 		enum init_split_types split_type;
3291 		u32 split_data_size;
3292 
3293 		split_hdr = (const struct dbg_dump_split_hdr *)
3294 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3295 		split_type =
3296 			GET_FIELD(split_hdr->hdr,
3297 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3298 		split_data_size =
3299 			GET_FIELD(split_hdr->hdr,
3300 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3301 		curr_input_mems_arr.ptr =
3302 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3303 		curr_input_mems_arr.size_in_dwords = split_data_size;
3304 
3305 		if (split_type == SPLIT_TYPE_NONE)
3306 			offset += qed_grc_dump_mem_entries(p_hwfn,
3307 							   p_ptt,
3308 							   curr_input_mems_arr,
3309 							   dump_buf + offset,
3310 							   dump);
3311 		else
3312 			DP_NOTICE(p_hwfn,
3313 				  "Dumping split memories is currently not supported\n");
3314 
3315 		input_offset += split_data_size;
3316 	}
3317 
3318 	return offset;
3319 }
3320 
3321 /* Dumps GRC context data for the specified Storm.
3322  * Returns the dumped size in dwords.
3323  * The lid_size argument is specified in quad-regs.
3324  */
3325 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3326 				 struct qed_ptt *p_ptt,
3327 				 u32 *dump_buf,
3328 				 bool dump,
3329 				 const char *name,
3330 				 u32 num_lids,
3331 				 u32 lid_size,
3332 				 u32 rd_reg_addr,
3333 				 u8 storm_id)
3334 {
3335 	struct storm_defs *storm = &s_storm_defs[storm_id];
3336 	u32 i, lid, total_size, offset = 0;
3337 
3338 	if (!lid_size)
3339 		return 0;
3340 
3341 	lid_size *= BYTES_IN_DWORD;
3342 	total_size = num_lids * lid_size;
3343 
3344 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3345 				       dump_buf + offset,
3346 				       dump,
3347 				       name,
3348 				       0,
3349 				       total_size,
3350 				       lid_size * 32,
3351 				       false, name, true, storm->letter);
3352 
3353 	if (!dump)
3354 		return offset + total_size;
3355 
3356 	/* Dump context data */
3357 	for (lid = 0; lid < num_lids; lid++) {
3358 		for (i = 0; i < lid_size; i++, offset++) {
3359 			qed_wr(p_hwfn,
3360 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3361 			*(dump_buf + offset) = qed_rd(p_hwfn,
3362 						      p_ptt, rd_reg_addr);
3363 		}
3364 	}
3365 
3366 	return offset;
3367 }
3368 
3369 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3370 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3371 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3372 {
3373 	enum dbg_grc_params grc_param;
3374 	u32 offset = 0;
3375 	u8 storm_id;
3376 
3377 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3378 		struct storm_defs *storm = &s_storm_defs[storm_id];
3379 
3380 		if (!qed_grc_is_storm_included(p_hwfn,
3381 					       (enum dbg_storms)storm_id))
3382 			continue;
3383 
3384 		/* Dump Conn AG context size */
3385 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3386 		offset +=
3387 			qed_grc_dump_ctx_data(p_hwfn,
3388 					      p_ptt,
3389 					      dump_buf + offset,
3390 					      dump,
3391 					      "CONN_AG_CTX",
3392 					      qed_grc_get_param(p_hwfn,
3393 								grc_param),
3394 					      storm->cm_conn_ag_ctx_lid_size,
3395 					      storm->cm_conn_ag_ctx_rd_addr,
3396 					      storm_id);
3397 
3398 		/* Dump Conn ST context size */
3399 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3400 		offset +=
3401 			qed_grc_dump_ctx_data(p_hwfn,
3402 					      p_ptt,
3403 					      dump_buf + offset,
3404 					      dump,
3405 					      "CONN_ST_CTX",
3406 					      qed_grc_get_param(p_hwfn,
3407 								grc_param),
3408 					      storm->cm_conn_st_ctx_lid_size,
3409 					      storm->cm_conn_st_ctx_rd_addr,
3410 					      storm_id);
3411 
3412 		/* Dump Task AG context size */
3413 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3414 		offset +=
3415 			qed_grc_dump_ctx_data(p_hwfn,
3416 					      p_ptt,
3417 					      dump_buf + offset,
3418 					      dump,
3419 					      "TASK_AG_CTX",
3420 					      qed_grc_get_param(p_hwfn,
3421 								grc_param),
3422 					      storm->cm_task_ag_ctx_lid_size,
3423 					      storm->cm_task_ag_ctx_rd_addr,
3424 					      storm_id);
3425 
3426 		/* Dump Task ST context size */
3427 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3428 		offset +=
3429 			qed_grc_dump_ctx_data(p_hwfn,
3430 					      p_ptt,
3431 					      dump_buf + offset,
3432 					      dump,
3433 					      "TASK_ST_CTX",
3434 					      qed_grc_get_param(p_hwfn,
3435 								grc_param),
3436 					      storm->cm_task_st_ctx_lid_size,
3437 					      storm->cm_task_st_ctx_rd_addr,
3438 					      storm_id);
3439 	}
3440 
3441 	return offset;
3442 }
3443 
3444 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3445 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3446 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3447 {
3448 	char buf[10] = "IOR_SET_?";
3449 	u32 addr, offset = 0;
3450 	u8 storm_id, set_id;
3451 
3452 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3453 		struct storm_defs *storm = &s_storm_defs[storm_id];
3454 
3455 		if (!qed_grc_is_storm_included(p_hwfn,
3456 					       (enum dbg_storms)storm_id))
3457 			continue;
3458 
3459 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3460 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3461 					       SEM_FAST_REG_STORM_REG_FILE) +
3462 			       IOR_SET_OFFSET(set_id);
3463 			if (strlen(buf) > 0)
3464 				buf[strlen(buf) - 1] = '0' + set_id;
3465 			offset += qed_grc_dump_mem(p_hwfn,
3466 						   p_ptt,
3467 						   dump_buf + offset,
3468 						   dump,
3469 						   buf,
3470 						   addr,
3471 						   IORS_PER_SET,
3472 						   false,
3473 						   32,
3474 						   false,
3475 						   "ior",
3476 						   true,
3477 						   storm->letter);
3478 		}
3479 	}
3480 
3481 	return offset;
3482 }
3483 
3484 /* Dump VFC CAM. Returns the dumped size in dwords. */
3485 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3486 				struct qed_ptt *p_ptt,
3487 				u32 *dump_buf, bool dump, u8 storm_id)
3488 {
3489 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3490 	struct storm_defs *storm = &s_storm_defs[storm_id];
3491 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3492 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3493 	u32 row, i, offset = 0;
3494 
3495 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3496 				       dump_buf + offset,
3497 				       dump,
3498 				       "vfc_cam",
3499 				       0,
3500 				       total_size,
3501 				       256,
3502 				       false, "vfc_cam", true, storm->letter);
3503 
3504 	if (!dump)
3505 		return offset + total_size;
3506 
3507 	/* Prepare CAM address */
3508 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3509 
3510 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3511 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3512 		/* Write VFC CAM command */
3513 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3514 		ARR_REG_WR(p_hwfn,
3515 			   p_ptt,
3516 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3517 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3518 
3519 		/* Write VFC CAM address */
3520 		ARR_REG_WR(p_hwfn,
3521 			   p_ptt,
3522 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3523 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3524 
3525 		/* Read VFC CAM read response */
3526 		ARR_REG_RD(p_hwfn,
3527 			   p_ptt,
3528 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3529 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3530 	}
3531 
3532 	return offset;
3533 }
3534 
3535 /* Dump VFC RAM. Returns the dumped size in dwords. */
3536 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3537 				struct qed_ptt *p_ptt,
3538 				u32 *dump_buf,
3539 				bool dump,
3540 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3541 {
3542 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3543 	struct storm_defs *storm = &s_storm_defs[storm_id];
3544 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3545 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3546 	u32 row, i, offset = 0;
3547 
3548 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3549 				       dump_buf + offset,
3550 				       dump,
3551 				       ram_defs->mem_name,
3552 				       0,
3553 				       total_size,
3554 				       256,
3555 				       false,
3556 				       ram_defs->type_name,
3557 				       true, storm->letter);
3558 
3559 	/* Prepare RAM address */
3560 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3561 
3562 	if (!dump)
3563 		return offset + total_size;
3564 
3565 	for (row = ram_defs->base_row;
3566 	     row < ram_defs->base_row + ram_defs->num_rows;
3567 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3568 		/* Write VFC RAM command */
3569 		ARR_REG_WR(p_hwfn,
3570 			   p_ptt,
3571 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3572 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3573 
3574 		/* Write VFC RAM address */
3575 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3576 		ARR_REG_WR(p_hwfn,
3577 			   p_ptt,
3578 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3579 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3580 
3581 		/* Read VFC RAM read response */
3582 		ARR_REG_RD(p_hwfn,
3583 			   p_ptt,
3584 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3585 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3586 	}
3587 
3588 	return offset;
3589 }
3590 
3591 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3592 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3593 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3594 {
3595 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3596 	u8 storm_id, i;
3597 	u32 offset = 0;
3598 
3599 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3600 		if (!qed_grc_is_storm_included(p_hwfn,
3601 					       (enum dbg_storms)storm_id) ||
3602 		    !s_storm_defs[storm_id].has_vfc ||
3603 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3604 		     PLATFORM_ASIC))
3605 			continue;
3606 
3607 		/* Read CAM */
3608 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3609 					       p_ptt,
3610 					       dump_buf + offset,
3611 					       dump, storm_id);
3612 
3613 		/* Read RAM */
3614 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3615 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3616 						       p_ptt,
3617 						       dump_buf + offset,
3618 						       dump,
3619 						       storm_id,
3620 						       &s_vfc_ram_defs[i]);
3621 	}
3622 
3623 	return offset;
3624 }
3625 
3626 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3627 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3628 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3629 {
3630 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3631 	u32 offset = 0;
3632 	u8 rss_mem_id;
3633 
3634 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3635 		u32 rss_addr, num_entries, total_dwords;
3636 		struct rss_mem_defs *rss_defs;
3637 		u32 addr, num_dwords_to_read;
3638 		bool packed;
3639 
3640 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3641 		rss_addr = rss_defs->addr;
3642 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3643 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3644 		packed = (rss_defs->entry_width == 16);
3645 
3646 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3647 					       dump_buf + offset,
3648 					       dump,
3649 					       rss_defs->mem_name,
3650 					       0,
3651 					       total_dwords,
3652 					       rss_defs->entry_width,
3653 					       packed,
3654 					       rss_defs->type_name, false, 0);
3655 
3656 		/* Dump RSS data */
3657 		if (!dump) {
3658 			offset += total_dwords;
3659 			continue;
3660 		}
3661 
3662 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3663 		while (total_dwords) {
3664 			num_dwords_to_read = min_t(u32,
3665 						   RSS_REG_RSS_RAM_DATA_SIZE,
3666 						   total_dwords);
3667 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3668 			offset += qed_grc_dump_addr_range(p_hwfn,
3669 							  p_ptt,
3670 							  dump_buf + offset,
3671 							  dump,
3672 							  addr,
3673 							  num_dwords_to_read,
3674 							  false,
3675 							  SPLIT_TYPE_NONE, 0);
3676 			total_dwords -= num_dwords_to_read;
3677 			rss_addr++;
3678 		}
3679 	}
3680 
3681 	return offset;
3682 }
3683 
3684 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3685 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3686 				struct qed_ptt *p_ptt,
3687 				u32 *dump_buf, bool dump, u8 big_ram_id)
3688 {
3689 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3690 	u32 block_size, ram_size, offset = 0, reg_val, i;
3691 	char mem_name[12] = "???_BIG_RAM";
3692 	char type_name[8] = "???_RAM";
3693 	struct big_ram_defs *big_ram;
3694 
3695 	big_ram = &s_big_ram_defs[big_ram_id];
3696 	ram_size = big_ram->ram_size[dev_data->chip_id];
3697 
3698 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3699 	block_size = reg_val &
3700 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3701 									 : 128;
3702 
3703 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3704 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3705 
3706 	/* Dump memory header */
3707 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3708 				       dump_buf + offset,
3709 				       dump,
3710 				       mem_name,
3711 				       0,
3712 				       ram_size,
3713 				       block_size * 8,
3714 				       false, type_name, false, 0);
3715 
3716 	/* Read and dump Big RAM data */
3717 	if (!dump)
3718 		return offset + ram_size;
3719 
3720 	/* Dump Big RAM */
3721 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3722 	     i++) {
3723 		u32 addr, len;
3724 
3725 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3726 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3727 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3728 		offset += qed_grc_dump_addr_range(p_hwfn,
3729 						  p_ptt,
3730 						  dump_buf + offset,
3731 						  dump,
3732 						  addr,
3733 						  len,
3734 						  false, SPLIT_TYPE_NONE, 0);
3735 	}
3736 
3737 	return offset;
3738 }
3739 
3740 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3741 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3742 {
3743 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3744 	u32 offset = 0, addr;
3745 	bool halted = false;
3746 
3747 	/* Halt MCP */
3748 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3749 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3750 		if (!halted)
3751 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3752 	}
3753 
3754 	/* Dump MCP scratchpad */
3755 	offset += qed_grc_dump_mem(p_hwfn,
3756 				   p_ptt,
3757 				   dump_buf + offset,
3758 				   dump,
3759 				   NULL,
3760 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3761 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3762 				   false, 0, false, "MCP", false, 0);
3763 
3764 	/* Dump MCP cpu_reg_file */
3765 	offset += qed_grc_dump_mem(p_hwfn,
3766 				   p_ptt,
3767 				   dump_buf + offset,
3768 				   dump,
3769 				   NULL,
3770 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3771 				   MCP_REG_CPU_REG_FILE_SIZE,
3772 				   false, 0, false, "MCP", false, 0);
3773 
3774 	/* Dump MCP registers */
3775 	block_enable[BLOCK_MCP] = true;
3776 	offset += qed_grc_dump_registers(p_hwfn,
3777 					 p_ptt,
3778 					 dump_buf + offset,
3779 					 dump, block_enable, "block", "MCP");
3780 
3781 	/* Dump required non-MCP registers */
3782 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3783 					dump, 1, SPLIT_TYPE_NONE, 0,
3784 					"block", "MCP");
3785 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3786 	offset += qed_grc_dump_reg_entry(p_hwfn,
3787 					 p_ptt,
3788 					 dump_buf + offset,
3789 					 dump,
3790 					 addr,
3791 					 1,
3792 					 false, SPLIT_TYPE_NONE, 0);
3793 
3794 	/* Release MCP */
3795 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3796 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3797 
3798 	return offset;
3799 }
3800 
3801 /* Dumps the tbus indirect memory for all PHYs. */
3802 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3803 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3804 {
3805 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3806 	char mem_name[32];
3807 	u8 phy_id;
3808 
3809 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3810 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3811 		struct phy_defs *phy_defs;
3812 		u8 *bytes_buf;
3813 
3814 		phy_defs = &s_phy_defs[phy_id];
3815 		addr_lo_addr = phy_defs->base_addr +
3816 			       phy_defs->tbus_addr_lo_addr;
3817 		addr_hi_addr = phy_defs->base_addr +
3818 			       phy_defs->tbus_addr_hi_addr;
3819 		data_lo_addr = phy_defs->base_addr +
3820 			       phy_defs->tbus_data_lo_addr;
3821 		data_hi_addr = phy_defs->base_addr +
3822 			       phy_defs->tbus_data_hi_addr;
3823 
3824 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3825 			     phy_defs->phy_name) < 0)
3826 			DP_NOTICE(p_hwfn,
3827 				  "Unexpected debug error: invalid PHY memory name\n");
3828 
3829 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3830 					       dump_buf + offset,
3831 					       dump,
3832 					       mem_name,
3833 					       0,
3834 					       PHY_DUMP_SIZE_DWORDS,
3835 					       16, true, mem_name, false, 0);
3836 
3837 		if (!dump) {
3838 			offset += PHY_DUMP_SIZE_DWORDS;
3839 			continue;
3840 		}
3841 
3842 		bytes_buf = (u8 *)(dump_buf + offset);
3843 		for (tbus_hi_offset = 0;
3844 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3845 		     tbus_hi_offset++) {
3846 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3847 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3848 			     tbus_lo_offset++) {
3849 				qed_wr(p_hwfn,
3850 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3851 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3852 							    p_ptt,
3853 							    data_lo_addr);
3854 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3855 							    p_ptt,
3856 							    data_hi_addr);
3857 			}
3858 		}
3859 
3860 		offset += PHY_DUMP_SIZE_DWORDS;
3861 	}
3862 
3863 	return offset;
3864 }
3865 
3866 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3867 				struct qed_ptt *p_ptt,
3868 				enum block_id block_id,
3869 				u8 line_id,
3870 				u8 enable_mask,
3871 				u8 right_shift,
3872 				u8 force_valid_mask, u8 force_frame_mask)
3873 {
3874 	struct block_defs *block = s_block_defs[block_id];
3875 
3876 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3877 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3878 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3879 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3880 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3881 }
3882 
3883 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3884 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3885 				     struct qed_ptt *p_ptt,
3886 				     u32 *dump_buf, bool dump)
3887 {
3888 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3889 	u32 block_id, line_id, offset = 0;
3890 
3891 	/* Don't dump static debug if a debug bus recording is in progress */
3892 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3893 		return 0;
3894 
3895 	if (dump) {
3896 		/* Disable all blocks debug output */
3897 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3898 			struct block_defs *block = s_block_defs[block_id];
3899 
3900 			if (block->dbg_client_id[dev_data->chip_id] !=
3901 			    MAX_DBG_BUS_CLIENTS)
3902 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3903 				       0);
3904 		}
3905 
3906 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3907 		qed_bus_set_framing_mode(p_hwfn,
3908 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3909 		qed_wr(p_hwfn,
3910 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3911 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3912 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3913 	}
3914 
3915 	/* Dump all static debug lines for each relevant block */
3916 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3917 		struct block_defs *block = s_block_defs[block_id];
3918 		struct dbg_bus_block *block_desc;
3919 		u32 block_dwords, addr, len;
3920 		u8 dbg_client_id;
3921 
3922 		if (block->dbg_client_id[dev_data->chip_id] ==
3923 		    MAX_DBG_BUS_CLIENTS)
3924 			continue;
3925 
3926 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3927 						    (enum block_id)block_id);
3928 		block_dwords = NUM_DBG_LINES(block_desc) *
3929 			       STATIC_DEBUG_LINE_DWORDS;
3930 
3931 		/* Dump static section params */
3932 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3933 					       dump_buf + offset,
3934 					       dump,
3935 					       block->name,
3936 					       0,
3937 					       block_dwords,
3938 					       32, false, "STATIC", false, 0);
3939 
3940 		if (!dump) {
3941 			offset += block_dwords;
3942 			continue;
3943 		}
3944 
3945 		/* If all lines are invalid - dump zeros */
3946 		if (dev_data->block_in_reset[block_id]) {
3947 			memset(dump_buf + offset, 0,
3948 			       DWORDS_TO_BYTES(block_dwords));
3949 			offset += block_dwords;
3950 			continue;
3951 		}
3952 
3953 		/* Enable block's client */
3954 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3955 		qed_bus_enable_clients(p_hwfn,
3956 				       p_ptt,
3957 				       BIT(dbg_client_id));
3958 
3959 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3960 		len = STATIC_DEBUG_LINE_DWORDS;
3961 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3962 		     line_id++) {
3963 			/* Configure debug line ID */
3964 			qed_config_dbg_line(p_hwfn,
3965 					    p_ptt,
3966 					    (enum block_id)block_id,
3967 					    (u8)line_id, 0xf, 0, 0, 0);
3968 
3969 			/* Read debug line info */
3970 			offset += qed_grc_dump_addr_range(p_hwfn,
3971 							  p_ptt,
3972 							  dump_buf + offset,
3973 							  dump,
3974 							  addr,
3975 							  len,
3976 							  true, SPLIT_TYPE_NONE,
3977 							  0);
3978 		}
3979 
3980 		/* Disable block's client and debug output */
3981 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3982 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3983 	}
3984 
3985 	if (dump) {
3986 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3987 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3988 	}
3989 
3990 	return offset;
3991 }
3992 
3993 /* Performs GRC Dump to the specified buffer.
3994  * Returns the dumped size in dwords.
3995  */
3996 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3997 				    struct qed_ptt *p_ptt,
3998 				    u32 *dump_buf,
3999 				    bool dump, u32 *num_dumped_dwords)
4000 {
4001 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4002 	bool parities_masked = false;
4003 	u32 offset = 0;
4004 	u8 i;
4005 
4006 	*num_dumped_dwords = 0;
4007 	dev_data->num_regs_read = 0;
4008 
4009 	/* Update reset state */
4010 	if (dump)
4011 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4012 
4013 	/* Dump global params */
4014 	offset += qed_dump_common_global_params(p_hwfn,
4015 						p_ptt,
4016 						dump_buf + offset, dump, 4);
4017 	offset += qed_dump_str_param(dump_buf + offset,
4018 				     dump, "dump-type", "grc-dump");
4019 	offset += qed_dump_num_param(dump_buf + offset,
4020 				     dump,
4021 				     "num-lcids",
4022 				     qed_grc_get_param(p_hwfn,
4023 						DBG_GRC_PARAM_NUM_LCIDS));
4024 	offset += qed_dump_num_param(dump_buf + offset,
4025 				     dump,
4026 				     "num-ltids",
4027 				     qed_grc_get_param(p_hwfn,
4028 						DBG_GRC_PARAM_NUM_LTIDS));
4029 	offset += qed_dump_num_param(dump_buf + offset,
4030 				     dump, "num-ports", dev_data->num_ports);
4031 
4032 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4033 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4034 		offset += qed_grc_dump_reset_regs(p_hwfn,
4035 						  p_ptt,
4036 						  dump_buf + offset, dump);
4037 
4038 	/* Take all blocks out of reset (using reset registers) */
4039 	if (dump) {
4040 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4041 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4042 	}
4043 
4044 	/* Disable all parities using MFW command */
4045 	if (dump &&
4046 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4047 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4048 		if (!parities_masked) {
4049 			DP_NOTICE(p_hwfn,
4050 				  "Failed to mask parities using MFW\n");
4051 			if (qed_grc_get_param
4052 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4053 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4054 		}
4055 	}
4056 
4057 	/* Dump modified registers (dumped before modifying them) */
4058 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4059 		offset += qed_grc_dump_modified_regs(p_hwfn,
4060 						     p_ptt,
4061 						     dump_buf + offset, dump);
4062 
4063 	/* Stall storms */
4064 	if (dump &&
4065 	    (qed_grc_is_included(p_hwfn,
4066 				 DBG_GRC_PARAM_DUMP_IOR) ||
4067 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4068 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4069 
4070 	/* Dump all regs  */
4071 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4072 		bool block_enable[MAX_BLOCK_ID];
4073 
4074 		/* Dump all blocks except MCP */
4075 		for (i = 0; i < MAX_BLOCK_ID; i++)
4076 			block_enable[i] = true;
4077 		block_enable[BLOCK_MCP] = false;
4078 		offset += qed_grc_dump_registers(p_hwfn,
4079 						 p_ptt,
4080 						 dump_buf +
4081 						 offset,
4082 						 dump,
4083 						 block_enable, NULL, NULL);
4084 
4085 		/* Dump special registers */
4086 		offset += qed_grc_dump_special_regs(p_hwfn,
4087 						    p_ptt,
4088 						    dump_buf + offset, dump);
4089 	}
4090 
4091 	/* Dump memories */
4092 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4093 
4094 	/* Dump MCP */
4095 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4096 		offset += qed_grc_dump_mcp(p_hwfn,
4097 					   p_ptt, dump_buf + offset, dump);
4098 
4099 	/* Dump context */
4100 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4101 		offset += qed_grc_dump_ctx(p_hwfn,
4102 					   p_ptt, dump_buf + offset, dump);
4103 
4104 	/* Dump RSS memories */
4105 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4106 		offset += qed_grc_dump_rss(p_hwfn,
4107 					   p_ptt, dump_buf + offset, dump);
4108 
4109 	/* Dump Big RAM */
4110 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4111 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4112 			offset += qed_grc_dump_big_ram(p_hwfn,
4113 						       p_ptt,
4114 						       dump_buf + offset,
4115 						       dump, i);
4116 
4117 	/* Dump IORs */
4118 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4119 		offset += qed_grc_dump_iors(p_hwfn,
4120 					    p_ptt, dump_buf + offset, dump);
4121 
4122 	/* Dump VFC */
4123 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4124 		offset += qed_grc_dump_vfc(p_hwfn,
4125 					   p_ptt, dump_buf + offset, dump);
4126 
4127 	/* Dump PHY tbus */
4128 	if (qed_grc_is_included(p_hwfn,
4129 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4130 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4131 		offset += qed_grc_dump_phy(p_hwfn,
4132 					   p_ptt, dump_buf + offset, dump);
4133 
4134 	/* Dump static debug data (only if not during debug bus recording) */
4135 	if (qed_grc_is_included(p_hwfn,
4136 				DBG_GRC_PARAM_DUMP_STATIC) &&
4137 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4138 		offset += qed_grc_dump_static_debug(p_hwfn,
4139 						    p_ptt,
4140 						    dump_buf + offset, dump);
4141 
4142 	/* Dump last section */
4143 	offset += qed_dump_last_section(dump_buf, offset, dump);
4144 
4145 	if (dump) {
4146 		/* Unstall storms */
4147 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4148 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4149 
4150 		/* Clear parity status */
4151 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4152 
4153 		/* Enable all parities using MFW command */
4154 		if (parities_masked)
4155 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4156 	}
4157 
4158 	*num_dumped_dwords = offset;
4159 
4160 	return DBG_STATUS_OK;
4161 }
4162 
4163 /* Writes the specified failing Idle Check rule to the specified buffer.
4164  * Returns the dumped size in dwords.
4165  */
4166 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4167 				     struct qed_ptt *p_ptt,
4168 				     u32 *
4169 				     dump_buf,
4170 				     bool dump,
4171 				     u16 rule_id,
4172 				     const struct dbg_idle_chk_rule *rule,
4173 				     u16 fail_entry_id, u32 *cond_reg_values)
4174 {
4175 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4176 	const struct dbg_idle_chk_cond_reg *cond_regs;
4177 	const struct dbg_idle_chk_info_reg *info_regs;
4178 	u32 i, next_reg_offset = 0, offset = 0;
4179 	struct dbg_idle_chk_result_hdr *hdr;
4180 	const union dbg_idle_chk_reg *regs;
4181 	u8 reg_id;
4182 
4183 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4184 	regs = &((const union dbg_idle_chk_reg *)
4185 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4186 	cond_regs = &regs[0].cond_reg;
4187 	info_regs = &regs[rule->num_cond_regs].info_reg;
4188 
4189 	/* Dump rule data */
4190 	if (dump) {
4191 		memset(hdr, 0, sizeof(*hdr));
4192 		hdr->rule_id = rule_id;
4193 		hdr->mem_entry_id = fail_entry_id;
4194 		hdr->severity = rule->severity;
4195 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4196 	}
4197 
4198 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4199 
4200 	/* Dump condition register values */
4201 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4202 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4203 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4204 
4205 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4206 			  (dump_buf + offset);
4207 
4208 		/* Write register header */
4209 		if (!dump) {
4210 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4211 			    reg->entry_size;
4212 			continue;
4213 		}
4214 
4215 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4216 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4217 		reg_hdr->start_entry = reg->start_entry;
4218 		reg_hdr->size = reg->entry_size;
4219 		SET_FIELD(reg_hdr->data,
4220 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4221 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4222 		SET_FIELD(reg_hdr->data,
4223 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4224 
4225 		/* Write register values */
4226 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4227 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4228 	}
4229 
4230 	/* Dump info register values */
4231 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4232 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4233 		u32 block_id;
4234 
4235 		/* Check if register's block is in reset */
4236 		if (!dump) {
4237 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4238 			continue;
4239 		}
4240 
4241 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4242 		if (block_id >= MAX_BLOCK_ID) {
4243 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4244 			return 0;
4245 		}
4246 
4247 		if (!dev_data->block_in_reset[block_id]) {
4248 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4249 			bool wide_bus, eval_mode, mode_match = true;
4250 			u16 modes_buf_offset;
4251 			u32 addr;
4252 
4253 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4254 				  (dump_buf + offset);
4255 
4256 			/* Check mode */
4257 			eval_mode = GET_FIELD(reg->mode.data,
4258 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4259 			if (eval_mode) {
4260 				modes_buf_offset =
4261 				    GET_FIELD(reg->mode.data,
4262 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4263 				mode_match =
4264 					qed_is_mode_match(p_hwfn,
4265 							  &modes_buf_offset);
4266 			}
4267 
4268 			if (!mode_match)
4269 				continue;
4270 
4271 			addr = GET_FIELD(reg->data,
4272 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4273 			wide_bus = GET_FIELD(reg->data,
4274 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4275 
4276 			/* Write register header */
4277 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4278 			hdr->num_dumped_info_regs++;
4279 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4280 			reg_hdr->size = reg->size;
4281 			SET_FIELD(reg_hdr->data,
4282 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4283 				  rule->num_cond_regs + reg_id);
4284 
4285 			/* Write register values */
4286 			offset += qed_grc_dump_addr_range(p_hwfn,
4287 							  p_ptt,
4288 							  dump_buf + offset,
4289 							  dump,
4290 							  addr,
4291 							  reg->size, wide_bus,
4292 							  SPLIT_TYPE_NONE, 0);
4293 		}
4294 	}
4295 
4296 	return offset;
4297 }
4298 
4299 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4300 static u32
4301 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4302 			       u32 *dump_buf, bool dump,
4303 			       const struct dbg_idle_chk_rule *input_rules,
4304 			       u32 num_input_rules, u32 *num_failing_rules)
4305 {
4306 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4307 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4308 	u32 i, offset = 0;
4309 	u16 entry_id;
4310 	u8 reg_id;
4311 
4312 	*num_failing_rules = 0;
4313 
4314 	for (i = 0; i < num_input_rules; i++) {
4315 		const struct dbg_idle_chk_cond_reg *cond_regs;
4316 		const struct dbg_idle_chk_rule *rule;
4317 		const union dbg_idle_chk_reg *regs;
4318 		u16 num_reg_entries = 1;
4319 		bool check_rule = true;
4320 		const u32 *imm_values;
4321 
4322 		rule = &input_rules[i];
4323 		regs = &((const union dbg_idle_chk_reg *)
4324 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4325 			[rule->reg_offset];
4326 		cond_regs = &regs[0].cond_reg;
4327 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4328 			     [rule->imm_offset];
4329 
4330 		/* Check if all condition register blocks are out of reset, and
4331 		 * find maximal number of entries (all condition registers that
4332 		 * are memories must have the same size, which is > 1).
4333 		 */
4334 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4335 		     reg_id++) {
4336 			u32 block_id =
4337 				GET_FIELD(cond_regs[reg_id].data,
4338 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4339 
4340 			if (block_id >= MAX_BLOCK_ID) {
4341 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4342 				return 0;
4343 			}
4344 
4345 			check_rule = !dev_data->block_in_reset[block_id];
4346 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4347 				num_reg_entries = cond_regs[reg_id].num_entries;
4348 		}
4349 
4350 		if (!check_rule && dump)
4351 			continue;
4352 
4353 		if (!dump) {
4354 			u32 entry_dump_size =
4355 				qed_idle_chk_dump_failure(p_hwfn,
4356 							  p_ptt,
4357 							  dump_buf + offset,
4358 							  false,
4359 							  rule->rule_id,
4360 							  rule,
4361 							  0,
4362 							  NULL);
4363 
4364 			offset += num_reg_entries * entry_dump_size;
4365 			(*num_failing_rules) += num_reg_entries;
4366 			continue;
4367 		}
4368 
4369 		/* Go over all register entries (number of entries is the same
4370 		 * for all condition registers).
4371 		 */
4372 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4373 			u32 next_reg_offset = 0;
4374 
4375 			/* Read current entry of all condition registers */
4376 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4377 			     reg_id++) {
4378 				const struct dbg_idle_chk_cond_reg *reg =
4379 					&cond_regs[reg_id];
4380 				u32 padded_entry_size, addr;
4381 				bool wide_bus;
4382 
4383 				/* Find GRC address (if it's a memory, the
4384 				 * address of the specific entry is calculated).
4385 				 */
4386 				addr = GET_FIELD(reg->data,
4387 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4388 				wide_bus =
4389 				    GET_FIELD(reg->data,
4390 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4391 				if (reg->num_entries > 1 ||
4392 				    reg->start_entry > 0) {
4393 					padded_entry_size =
4394 					   reg->entry_size > 1 ?
4395 					   roundup_pow_of_two(reg->entry_size) :
4396 					   1;
4397 					addr += (reg->start_entry + entry_id) *
4398 						padded_entry_size;
4399 				}
4400 
4401 				/* Read registers */
4402 				if (next_reg_offset + reg->entry_size >=
4403 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4404 					DP_NOTICE(p_hwfn,
4405 						  "idle check registers entry is too large\n");
4406 					return 0;
4407 				}
4408 
4409 				next_reg_offset +=
4410 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4411 							    cond_reg_values +
4412 							    next_reg_offset,
4413 							    dump, addr,
4414 							    reg->entry_size,
4415 							    wide_bus,
4416 							    SPLIT_TYPE_NONE, 0);
4417 			}
4418 
4419 			/* Call rule condition function.
4420 			 * If returns true, it's a failure.
4421 			 */
4422 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4423 							imm_values)) {
4424 				offset += qed_idle_chk_dump_failure(p_hwfn,
4425 							p_ptt,
4426 							dump_buf + offset,
4427 							dump,
4428 							rule->rule_id,
4429 							rule,
4430 							entry_id,
4431 							cond_reg_values);
4432 				(*num_failing_rules)++;
4433 			}
4434 		}
4435 	}
4436 
4437 	return offset;
4438 }
4439 
4440 /* Performs Idle Check Dump to the specified buffer.
4441  * Returns the dumped size in dwords.
4442  */
4443 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4444 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4445 {
4446 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4447 	u32 num_failing_rules = 0;
4448 
4449 	/* Dump global params */
4450 	offset += qed_dump_common_global_params(p_hwfn,
4451 						p_ptt,
4452 						dump_buf + offset, dump, 1);
4453 	offset += qed_dump_str_param(dump_buf + offset,
4454 				     dump, "dump-type", "idle-chk");
4455 
4456 	/* Dump idle check section header with a single parameter */
4457 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4458 	num_failing_rules_offset = offset;
4459 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4460 
4461 	while (input_offset <
4462 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4463 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4464 			(const struct dbg_idle_chk_cond_hdr *)
4465 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4466 			[input_offset++];
4467 		bool eval_mode, mode_match = true;
4468 		u32 curr_failing_rules;
4469 		u16 modes_buf_offset;
4470 
4471 		/* Check mode */
4472 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4473 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4474 		if (eval_mode) {
4475 			modes_buf_offset =
4476 				GET_FIELD(cond_hdr->mode.data,
4477 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4478 			mode_match = qed_is_mode_match(p_hwfn,
4479 						       &modes_buf_offset);
4480 		}
4481 
4482 		if (mode_match) {
4483 			offset +=
4484 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4485 				p_ptt,
4486 				dump_buf + offset,
4487 				dump,
4488 				(const struct dbg_idle_chk_rule *)
4489 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4490 				ptr[input_offset],
4491 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4492 				&curr_failing_rules);
4493 			num_failing_rules += curr_failing_rules;
4494 		}
4495 
4496 		input_offset += cond_hdr->data_size;
4497 	}
4498 
4499 	/* Overwrite num_rules parameter */
4500 	if (dump)
4501 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4502 				   dump, "num_rules", num_failing_rules);
4503 
4504 	/* Dump last section */
4505 	offset += qed_dump_last_section(dump_buf, offset, dump);
4506 
4507 	return offset;
4508 }
4509 
4510 /* Finds the meta data image in NVRAM */
4511 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4512 					    struct qed_ptt *p_ptt,
4513 					    u32 image_type,
4514 					    u32 *nvram_offset_bytes,
4515 					    u32 *nvram_size_bytes)
4516 {
4517 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4518 	struct mcp_file_att file_att;
4519 	int nvm_result;
4520 
4521 	/* Call NVRAM get file command */
4522 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4523 					p_ptt,
4524 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4525 					image_type,
4526 					&ret_mcp_resp,
4527 					&ret_mcp_param,
4528 					&ret_txn_size, (u32 *)&file_att);
4529 
4530 	/* Check response */
4531 	if (nvm_result ||
4532 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4533 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4534 
4535 	/* Update return values */
4536 	*nvram_offset_bytes = file_att.nvm_start_addr;
4537 	*nvram_size_bytes = file_att.len;
4538 
4539 	DP_VERBOSE(p_hwfn,
4540 		   QED_MSG_DEBUG,
4541 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4542 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4543 
4544 	/* Check alignment */
4545 	if (*nvram_size_bytes & 0x3)
4546 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4547 
4548 	return DBG_STATUS_OK;
4549 }
4550 
4551 /* Reads data from NVRAM */
4552 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4553 				      struct qed_ptt *p_ptt,
4554 				      u32 nvram_offset_bytes,
4555 				      u32 nvram_size_bytes, u32 *ret_buf)
4556 {
4557 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4558 	s32 bytes_left = nvram_size_bytes;
4559 	u32 read_offset = 0;
4560 
4561 	DP_VERBOSE(p_hwfn,
4562 		   QED_MSG_DEBUG,
4563 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4564 		   nvram_size_bytes);
4565 
4566 	do {
4567 		bytes_to_copy =
4568 		    (bytes_left >
4569 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4570 
4571 		/* Call NVRAM read command */
4572 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4573 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4574 				       (nvram_offset_bytes +
4575 					read_offset) |
4576 				       (bytes_to_copy <<
4577 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4578 				       &ret_mcp_resp, &ret_mcp_param,
4579 				       &ret_read_size,
4580 				       (u32 *)((u8 *)ret_buf + read_offset)))
4581 			return DBG_STATUS_NVRAM_READ_FAILED;
4582 
4583 		/* Check response */
4584 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4585 			return DBG_STATUS_NVRAM_READ_FAILED;
4586 
4587 		/* Update read offset */
4588 		read_offset += ret_read_size;
4589 		bytes_left -= ret_read_size;
4590 	} while (bytes_left > 0);
4591 
4592 	return DBG_STATUS_OK;
4593 }
4594 
4595 /* Get info on the MCP Trace data in the scratchpad:
4596  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4597  * - trace_data_size (OUT): trace data size in bytes (without the header)
4598  */
4599 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4600 						   struct qed_ptt *p_ptt,
4601 						   u32 *trace_data_grc_addr,
4602 						   u32 *trace_data_size)
4603 {
4604 	u32 spad_trace_offsize, signature;
4605 
4606 	/* Read trace section offsize structure from MCP scratchpad */
4607 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4608 
4609 	/* Extract trace section address from offsize (in scratchpad) */
4610 	*trace_data_grc_addr =
4611 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4612 
4613 	/* Read signature from MCP trace section */
4614 	signature = qed_rd(p_hwfn, p_ptt,
4615 			   *trace_data_grc_addr +
4616 			   offsetof(struct mcp_trace, signature));
4617 
4618 	if (signature != MFW_TRACE_SIGNATURE)
4619 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4620 
4621 	/* Read trace size from MCP trace section */
4622 	*trace_data_size = qed_rd(p_hwfn,
4623 				  p_ptt,
4624 				  *trace_data_grc_addr +
4625 				  offsetof(struct mcp_trace, size));
4626 
4627 	return DBG_STATUS_OK;
4628 }
4629 
4630 /* Reads MCP trace meta data image from NVRAM
4631  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4632  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4633  *			      loaded from file).
4634  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4635  */
4636 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4637 						   struct qed_ptt *p_ptt,
4638 						   u32 trace_data_size_bytes,
4639 						   u32 *running_bundle_id,
4640 						   u32 *trace_meta_offset,
4641 						   u32 *trace_meta_size)
4642 {
4643 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4644 
4645 	/* Read MCP trace section offsize structure from MCP scratchpad */
4646 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4647 
4648 	/* Find running bundle ID */
4649 	running_mfw_addr =
4650 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4651 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4652 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4653 	if (*running_bundle_id > 1)
4654 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4655 
4656 	/* Find image in NVRAM */
4657 	nvram_image_type =
4658 	    (*running_bundle_id ==
4659 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4660 	return qed_find_nvram_image(p_hwfn,
4661 				    p_ptt,
4662 				    nvram_image_type,
4663 				    trace_meta_offset, trace_meta_size);
4664 }
4665 
4666 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4667 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4668 					       struct qed_ptt *p_ptt,
4669 					       u32 nvram_offset_in_bytes,
4670 					       u32 size_in_bytes, u32 *buf)
4671 {
4672 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4673 	enum dbg_status status;
4674 	u32 signature;
4675 
4676 	/* Read meta data from NVRAM */
4677 	status = qed_nvram_read(p_hwfn,
4678 				p_ptt,
4679 				nvram_offset_in_bytes, size_in_bytes, buf);
4680 	if (status != DBG_STATUS_OK)
4681 		return status;
4682 
4683 	/* Extract and check first signature */
4684 	signature = qed_read_unaligned_dword(byte_buf);
4685 	byte_buf += sizeof(signature);
4686 	if (signature != NVM_MAGIC_VALUE)
4687 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4688 
4689 	/* Extract number of modules */
4690 	modules_num = *(byte_buf++);
4691 
4692 	/* Skip all modules */
4693 	for (i = 0; i < modules_num; i++) {
4694 		module_len = *(byte_buf++);
4695 		byte_buf += module_len;
4696 	}
4697 
4698 	/* Extract and check second signature */
4699 	signature = qed_read_unaligned_dword(byte_buf);
4700 	byte_buf += sizeof(signature);
4701 	if (signature != NVM_MAGIC_VALUE)
4702 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4703 
4704 	return DBG_STATUS_OK;
4705 }
4706 
4707 /* Dump MCP Trace */
4708 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4709 					  struct qed_ptt *p_ptt,
4710 					  u32 *dump_buf,
4711 					  bool dump, u32 *num_dumped_dwords)
4712 {
4713 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4714 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4715 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4716 	enum dbg_status status;
4717 	bool mcp_access;
4718 	int halted = 0;
4719 
4720 	*num_dumped_dwords = 0;
4721 
4722 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4723 
4724 	/* Get trace data info */
4725 	status = qed_mcp_trace_get_data_info(p_hwfn,
4726 					     p_ptt,
4727 					     &trace_data_grc_addr,
4728 					     &trace_data_size_bytes);
4729 	if (status != DBG_STATUS_OK)
4730 		return status;
4731 
4732 	/* Dump global params */
4733 	offset += qed_dump_common_global_params(p_hwfn,
4734 						p_ptt,
4735 						dump_buf + offset, dump, 1);
4736 	offset += qed_dump_str_param(dump_buf + offset,
4737 				     dump, "dump-type", "mcp-trace");
4738 
4739 	/* Halt MCP while reading from scratchpad so the read data will be
4740 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4741 	 * risk that it may be corrupt.
4742 	 */
4743 	if (dump && mcp_access) {
4744 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4745 		if (!halted)
4746 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4747 	}
4748 
4749 	/* Find trace data size */
4750 	trace_data_size_dwords =
4751 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4752 			 BYTES_IN_DWORD);
4753 
4754 	/* Dump trace data section header and param */
4755 	offset += qed_dump_section_hdr(dump_buf + offset,
4756 				       dump, "mcp_trace_data", 1);
4757 	offset += qed_dump_num_param(dump_buf + offset,
4758 				     dump, "size", trace_data_size_dwords);
4759 
4760 	/* Read trace data from scratchpad into dump buffer */
4761 	offset += qed_grc_dump_addr_range(p_hwfn,
4762 					  p_ptt,
4763 					  dump_buf + offset,
4764 					  dump,
4765 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4766 					  trace_data_size_dwords, false,
4767 					  SPLIT_TYPE_NONE, 0);
4768 
4769 	/* Resume MCP (only if halt succeeded) */
4770 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4771 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4772 
4773 	/* Dump trace meta section header */
4774 	offset += qed_dump_section_hdr(dump_buf + offset,
4775 				       dump, "mcp_trace_meta", 1);
4776 
4777 	/* If MCP Trace meta size parameter was set, use it.
4778 	 * Otherwise, read trace meta.
4779 	 * trace_meta_size_bytes is dword-aligned.
4780 	 */
4781 	trace_meta_size_bytes =
4782 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4783 	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4784 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4785 						     p_ptt,
4786 						     trace_data_size_bytes,
4787 						     &running_bundle_id,
4788 						     &trace_meta_offset_bytes,
4789 						     &trace_meta_size_bytes);
4790 		if (status == DBG_STATUS_OK)
4791 			trace_meta_size_dwords =
4792 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4793 	}
4794 
4795 	/* Dump trace meta size param */
4796 	offset += qed_dump_num_param(dump_buf + offset,
4797 				     dump, "size", trace_meta_size_dwords);
4798 
4799 	/* Read trace meta image into dump buffer */
4800 	if (dump && trace_meta_size_dwords)
4801 		status = qed_mcp_trace_read_meta(p_hwfn,
4802 						 p_ptt,
4803 						 trace_meta_offset_bytes,
4804 						 trace_meta_size_bytes,
4805 						 dump_buf + offset);
4806 	if (status == DBG_STATUS_OK)
4807 		offset += trace_meta_size_dwords;
4808 
4809 	/* Dump last section */
4810 	offset += qed_dump_last_section(dump_buf, offset, dump);
4811 
4812 	*num_dumped_dwords = offset;
4813 
4814 	/* If no mcp access, indicate that the dump doesn't contain the meta
4815 	 * data from NVRAM.
4816 	 */
4817 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4818 }
4819 
4820 /* Dump GRC FIFO */
4821 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4822 					 struct qed_ptt *p_ptt,
4823 					 u32 *dump_buf,
4824 					 bool dump, u32 *num_dumped_dwords)
4825 {
4826 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4827 	bool fifo_has_data;
4828 
4829 	*num_dumped_dwords = 0;
4830 
4831 	/* Dump global params */
4832 	offset += qed_dump_common_global_params(p_hwfn,
4833 						p_ptt,
4834 						dump_buf + offset, dump, 1);
4835 	offset += qed_dump_str_param(dump_buf + offset,
4836 				     dump, "dump-type", "reg-fifo");
4837 
4838 	/* Dump fifo data section header and param. The size param is 0 for
4839 	 * now, and is overwritten after reading the FIFO.
4840 	 */
4841 	offset += qed_dump_section_hdr(dump_buf + offset,
4842 				       dump, "reg_fifo_data", 1);
4843 	size_param_offset = offset;
4844 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4845 
4846 	if (!dump) {
4847 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4848 		 * test how much data is available, except for reading it.
4849 		 */
4850 		offset += REG_FIFO_DEPTH_DWORDS;
4851 		goto out;
4852 	}
4853 
4854 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4855 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4856 
4857 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4858 	 * and must be accessed atomically. Test for dwords_read not passing
4859 	 * buffer size since more entries could be added to the buffer as we are
4860 	 * emptying it.
4861 	 */
4862 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4863 	len = REG_FIFO_ELEMENT_DWORDS;
4864 	for (dwords_read = 0;
4865 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4866 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4867 		offset += qed_grc_dump_addr_range(p_hwfn,
4868 						  p_ptt,
4869 						  dump_buf + offset,
4870 						  true,
4871 						  addr,
4872 						  len,
4873 						  true, SPLIT_TYPE_NONE,
4874 						  0);
4875 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4876 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4877 	}
4878 
4879 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4880 			   dwords_read);
4881 out:
4882 	/* Dump last section */
4883 	offset += qed_dump_last_section(dump_buf, offset, dump);
4884 
4885 	*num_dumped_dwords = offset;
4886 
4887 	return DBG_STATUS_OK;
4888 }
4889 
4890 /* Dump IGU FIFO */
4891 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4892 					 struct qed_ptt *p_ptt,
4893 					 u32 *dump_buf,
4894 					 bool dump, u32 *num_dumped_dwords)
4895 {
4896 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4897 	bool fifo_has_data;
4898 
4899 	*num_dumped_dwords = 0;
4900 
4901 	/* Dump global params */
4902 	offset += qed_dump_common_global_params(p_hwfn,
4903 						p_ptt,
4904 						dump_buf + offset, dump, 1);
4905 	offset += qed_dump_str_param(dump_buf + offset,
4906 				     dump, "dump-type", "igu-fifo");
4907 
4908 	/* Dump fifo data section header and param. The size param is 0 for
4909 	 * now, and is overwritten after reading the FIFO.
4910 	 */
4911 	offset += qed_dump_section_hdr(dump_buf + offset,
4912 				       dump, "igu_fifo_data", 1);
4913 	size_param_offset = offset;
4914 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4915 
4916 	if (!dump) {
4917 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4918 		 * test how much data is available, except for reading it.
4919 		 */
4920 		offset += IGU_FIFO_DEPTH_DWORDS;
4921 		goto out;
4922 	}
4923 
4924 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4925 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4926 
4927 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4928 	 * and must be accessed atomically. Test for dwords_read not passing
4929 	 * buffer size since more entries could be added to the buffer as we are
4930 	 * emptying it.
4931 	 */
4932 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4933 	len = IGU_FIFO_ELEMENT_DWORDS;
4934 	for (dwords_read = 0;
4935 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4936 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4937 		offset += qed_grc_dump_addr_range(p_hwfn,
4938 						  p_ptt,
4939 						  dump_buf + offset,
4940 						  true,
4941 						  addr,
4942 						  len,
4943 						  true, SPLIT_TYPE_NONE,
4944 						  0);
4945 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4946 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4947 	}
4948 
4949 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4950 			   dwords_read);
4951 out:
4952 	/* Dump last section */
4953 	offset += qed_dump_last_section(dump_buf, offset, dump);
4954 
4955 	*num_dumped_dwords = offset;
4956 
4957 	return DBG_STATUS_OK;
4958 }
4959 
4960 /* Protection Override dump */
4961 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4962 						    struct qed_ptt *p_ptt,
4963 						    u32 *dump_buf,
4964 						    bool dump,
4965 						    u32 *num_dumped_dwords)
4966 {
4967 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4968 
4969 	*num_dumped_dwords = 0;
4970 
4971 	/* Dump global params */
4972 	offset += qed_dump_common_global_params(p_hwfn,
4973 						p_ptt,
4974 						dump_buf + offset, dump, 1);
4975 	offset += qed_dump_str_param(dump_buf + offset,
4976 				     dump, "dump-type", "protection-override");
4977 
4978 	/* Dump data section header and param. The size param is 0 for now,
4979 	 * and is overwritten after reading the data.
4980 	 */
4981 	offset += qed_dump_section_hdr(dump_buf + offset,
4982 				       dump, "protection_override_data", 1);
4983 	size_param_offset = offset;
4984 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4985 
4986 	if (!dump) {
4987 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4988 		goto out;
4989 	}
4990 
4991 	/* Add override window info to buffer */
4992 	override_window_dwords =
4993 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4994 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4995 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4996 	offset += qed_grc_dump_addr_range(p_hwfn,
4997 					  p_ptt,
4998 					  dump_buf + offset,
4999 					  true,
5000 					  addr,
5001 					  override_window_dwords,
5002 					  true, SPLIT_TYPE_NONE, 0);
5003 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
5004 			   override_window_dwords);
5005 out:
5006 	/* Dump last section */
5007 	offset += qed_dump_last_section(dump_buf, offset, dump);
5008 
5009 	*num_dumped_dwords = offset;
5010 
5011 	return DBG_STATUS_OK;
5012 }
5013 
5014 /* Performs FW Asserts Dump to the specified buffer.
5015  * Returns the dumped size in dwords.
5016  */
5017 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5018 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5019 {
5020 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5021 	struct fw_asserts_ram_section *asserts;
5022 	char storm_letter_str[2] = "?";
5023 	struct fw_info fw_info;
5024 	u32 offset = 0;
5025 	u8 storm_id;
5026 
5027 	/* Dump global params */
5028 	offset += qed_dump_common_global_params(p_hwfn,
5029 						p_ptt,
5030 						dump_buf + offset, dump, 1);
5031 	offset += qed_dump_str_param(dump_buf + offset,
5032 				     dump, "dump-type", "fw-asserts");
5033 
5034 	/* Find Storm dump size */
5035 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5036 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5037 		struct storm_defs *storm = &s_storm_defs[storm_id];
5038 		u32 last_list_idx, addr;
5039 
5040 		if (dev_data->block_in_reset[storm->block_id])
5041 			continue;
5042 
5043 		/* Read FW info for the current Storm */
5044 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5045 
5046 		asserts = &fw_info.fw_asserts_section;
5047 
5048 		/* Dump FW Asserts section header and params */
5049 		storm_letter_str[0] = storm->letter;
5050 		offset += qed_dump_section_hdr(dump_buf + offset,
5051 					       dump, "fw_asserts", 2);
5052 		offset += qed_dump_str_param(dump_buf + offset,
5053 					     dump, "storm", storm_letter_str);
5054 		offset += qed_dump_num_param(dump_buf + offset,
5055 					     dump,
5056 					     "size",
5057 					     asserts->list_element_dword_size);
5058 
5059 		/* Read and dump FW Asserts data */
5060 		if (!dump) {
5061 			offset += asserts->list_element_dword_size;
5062 			continue;
5063 		}
5064 
5065 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5066 			SEM_FAST_REG_INT_RAM +
5067 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5068 		next_list_idx_addr = fw_asserts_section_addr +
5069 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5070 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5071 		last_list_idx = (next_list_idx > 0 ?
5072 				 next_list_idx :
5073 				 asserts->list_num_elements) - 1;
5074 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5075 		       asserts->list_dword_offset +
5076 		       last_list_idx * asserts->list_element_dword_size;
5077 		offset +=
5078 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5079 					    dump_buf + offset,
5080 					    dump, addr,
5081 					    asserts->list_element_dword_size,
5082 						  false, SPLIT_TYPE_NONE, 0);
5083 	}
5084 
5085 	/* Dump last section */
5086 	offset += qed_dump_last_section(dump_buf, offset, dump);
5087 
5088 	return offset;
5089 }
5090 
5091 /***************************** Public Functions *******************************/
5092 
5093 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5094 {
5095 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5096 	u8 buf_id;
5097 
5098 	/* convert binary data to debug arrays */
5099 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5100 		s_dbg_arrays[buf_id].ptr =
5101 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5102 		s_dbg_arrays[buf_id].size_in_dwords =
5103 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5104 	}
5105 
5106 	return DBG_STATUS_OK;
5107 }
5108 
5109 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5110 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5111 {
5112 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5113 	u8 storm_id;
5114 
5115 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5116 		struct storm_defs *storm = &s_storm_defs[storm_id];
5117 
5118 		/* Skip Storm if it's in reset */
5119 		if (dev_data->block_in_reset[storm->block_id])
5120 			continue;
5121 
5122 		/* Read FW info for the current Storm */
5123 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5124 
5125 		return true;
5126 	}
5127 
5128 	return false;
5129 }
5130 
5131 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5132 				   struct qed_ptt *p_ptt,
5133 				   enum dbg_grc_params grc_param, u32 val)
5134 {
5135 	enum dbg_status status;
5136 	int i;
5137 
5138 	DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
5139 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5140 
5141 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5142 	if (status != DBG_STATUS_OK)
5143 		return status;
5144 
5145 	/* Initializes the GRC parameters (if not initialized). Needed in order
5146 	 * to set the default parameter values for the first time.
5147 	 */
5148 	qed_dbg_grc_init_params(p_hwfn);
5149 
5150 	if (grc_param >= MAX_DBG_GRC_PARAMS)
5151 		return DBG_STATUS_INVALID_ARGS;
5152 	if (val < s_grc_param_defs[grc_param].min ||
5153 	    val > s_grc_param_defs[grc_param].max)
5154 		return DBG_STATUS_INVALID_ARGS;
5155 
5156 	if (s_grc_param_defs[grc_param].is_preset) {
5157 		/* Preset param */
5158 
5159 		/* Disabling a preset is not allowed. Call
5160 		 * dbg_grc_set_params_default instead.
5161 		 */
5162 		if (!val)
5163 			return DBG_STATUS_INVALID_ARGS;
5164 
5165 		/* Update all params with the preset values */
5166 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5167 			u32 preset_val;
5168 
5169 			/* Skip persistent params */
5170 			if (s_grc_param_defs[i].is_persistent)
5171 				continue;
5172 
5173 			/* Find preset value */
5174 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5175 				preset_val =
5176 				    s_grc_param_defs[i].exclude_all_preset_val;
5177 			else if (grc_param == DBG_GRC_PARAM_CRASH)
5178 				preset_val =
5179 				    s_grc_param_defs[i].crash_preset_val;
5180 			else
5181 				return DBG_STATUS_INVALID_ARGS;
5182 
5183 			qed_grc_set_param(p_hwfn,
5184 					  (enum dbg_grc_params)i, preset_val);
5185 		}
5186 	} else {
5187 		/* Regular param - set its value */
5188 		qed_grc_set_param(p_hwfn, grc_param, val);
5189 	}
5190 
5191 	return DBG_STATUS_OK;
5192 }
5193 
5194 /* Assign default GRC param values */
5195 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5196 {
5197 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5198 	u32 i;
5199 
5200 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5201 		if (!s_grc_param_defs[i].is_persistent)
5202 			dev_data->grc.param_val[i] =
5203 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5204 }
5205 
5206 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5207 					      struct qed_ptt *p_ptt,
5208 					      u32 *buf_size)
5209 {
5210 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5211 
5212 	*buf_size = 0;
5213 
5214 	if (status != DBG_STATUS_OK)
5215 		return status;
5216 
5217 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5218 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5219 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5220 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5221 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5222 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5223 
5224 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5225 }
5226 
5227 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5228 				 struct qed_ptt *p_ptt,
5229 				 u32 *dump_buf,
5230 				 u32 buf_size_in_dwords,
5231 				 u32 *num_dumped_dwords)
5232 {
5233 	u32 needed_buf_size_in_dwords;
5234 	enum dbg_status status;
5235 
5236 	*num_dumped_dwords = 0;
5237 
5238 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5239 					       p_ptt,
5240 					       &needed_buf_size_in_dwords);
5241 	if (status != DBG_STATUS_OK)
5242 		return status;
5243 
5244 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5245 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5246 
5247 	/* GRC Dump */
5248 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5249 
5250 	/* Revert GRC params to their default */
5251 	qed_dbg_grc_set_params_default(p_hwfn);
5252 
5253 	return status;
5254 }
5255 
5256 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5257 						   struct qed_ptt *p_ptt,
5258 						   u32 *buf_size)
5259 {
5260 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5261 	struct idle_chk_data *idle_chk;
5262 	enum dbg_status status;
5263 
5264 	idle_chk = &dev_data->idle_chk;
5265 	*buf_size = 0;
5266 
5267 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5268 	if (status != DBG_STATUS_OK)
5269 		return status;
5270 
5271 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5272 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5273 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5274 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5275 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5276 
5277 	if (!idle_chk->buf_size_set) {
5278 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5279 						       p_ptt, NULL, false);
5280 		idle_chk->buf_size_set = true;
5281 	}
5282 
5283 	*buf_size = idle_chk->buf_size;
5284 
5285 	return DBG_STATUS_OK;
5286 }
5287 
5288 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5289 				      struct qed_ptt *p_ptt,
5290 				      u32 *dump_buf,
5291 				      u32 buf_size_in_dwords,
5292 				      u32 *num_dumped_dwords)
5293 {
5294 	u32 needed_buf_size_in_dwords;
5295 	enum dbg_status status;
5296 
5297 	*num_dumped_dwords = 0;
5298 
5299 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5300 						    p_ptt,
5301 						    &needed_buf_size_in_dwords);
5302 	if (status != DBG_STATUS_OK)
5303 		return status;
5304 
5305 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5306 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5307 
5308 	/* Update reset state */
5309 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5310 
5311 	/* Idle Check Dump */
5312 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5313 
5314 	/* Revert GRC params to their default */
5315 	qed_dbg_grc_set_params_default(p_hwfn);
5316 
5317 	return DBG_STATUS_OK;
5318 }
5319 
5320 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5321 						    struct qed_ptt *p_ptt,
5322 						    u32 *buf_size)
5323 {
5324 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5325 
5326 	*buf_size = 0;
5327 
5328 	if (status != DBG_STATUS_OK)
5329 		return status;
5330 
5331 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5332 }
5333 
5334 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5335 				       struct qed_ptt *p_ptt,
5336 				       u32 *dump_buf,
5337 				       u32 buf_size_in_dwords,
5338 				       u32 *num_dumped_dwords)
5339 {
5340 	u32 needed_buf_size_in_dwords;
5341 	enum dbg_status status;
5342 
5343 	status =
5344 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5345 						    p_ptt,
5346 						    &needed_buf_size_in_dwords);
5347 	if (status != DBG_STATUS_OK && status !=
5348 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5349 		return status;
5350 
5351 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5352 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5353 
5354 	/* Update reset state */
5355 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5356 
5357 	/* Perform dump */
5358 	status = qed_mcp_trace_dump(p_hwfn,
5359 				    p_ptt, dump_buf, true, num_dumped_dwords);
5360 
5361 	/* Revert GRC params to their default */
5362 	qed_dbg_grc_set_params_default(p_hwfn);
5363 
5364 	return status;
5365 }
5366 
5367 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5368 						   struct qed_ptt *p_ptt,
5369 						   u32 *buf_size)
5370 {
5371 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5372 
5373 	*buf_size = 0;
5374 
5375 	if (status != DBG_STATUS_OK)
5376 		return status;
5377 
5378 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5379 }
5380 
5381 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5382 				      struct qed_ptt *p_ptt,
5383 				      u32 *dump_buf,
5384 				      u32 buf_size_in_dwords,
5385 				      u32 *num_dumped_dwords)
5386 {
5387 	u32 needed_buf_size_in_dwords;
5388 	enum dbg_status status;
5389 
5390 	*num_dumped_dwords = 0;
5391 
5392 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5393 						    p_ptt,
5394 						    &needed_buf_size_in_dwords);
5395 	if (status != DBG_STATUS_OK)
5396 		return status;
5397 
5398 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5399 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5400 
5401 	/* Update reset state */
5402 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5403 
5404 	status = qed_reg_fifo_dump(p_hwfn,
5405 				   p_ptt, dump_buf, true, num_dumped_dwords);
5406 
5407 	/* Revert GRC params to their default */
5408 	qed_dbg_grc_set_params_default(p_hwfn);
5409 
5410 	return status;
5411 }
5412 
5413 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5414 						   struct qed_ptt *p_ptt,
5415 						   u32 *buf_size)
5416 {
5417 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5418 
5419 	*buf_size = 0;
5420 
5421 	if (status != DBG_STATUS_OK)
5422 		return status;
5423 
5424 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5425 }
5426 
5427 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5428 				      struct qed_ptt *p_ptt,
5429 				      u32 *dump_buf,
5430 				      u32 buf_size_in_dwords,
5431 				      u32 *num_dumped_dwords)
5432 {
5433 	u32 needed_buf_size_in_dwords;
5434 	enum dbg_status status;
5435 
5436 	*num_dumped_dwords = 0;
5437 
5438 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5439 						    p_ptt,
5440 						    &needed_buf_size_in_dwords);
5441 	if (status != DBG_STATUS_OK)
5442 		return status;
5443 
5444 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5445 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5446 
5447 	/* Update reset state */
5448 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5449 
5450 	status = qed_igu_fifo_dump(p_hwfn,
5451 				   p_ptt, dump_buf, true, num_dumped_dwords);
5452 	/* Revert GRC params to their default */
5453 	qed_dbg_grc_set_params_default(p_hwfn);
5454 
5455 	return status;
5456 }
5457 
5458 enum dbg_status
5459 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5460 					      struct qed_ptt *p_ptt,
5461 					      u32 *buf_size)
5462 {
5463 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5464 
5465 	*buf_size = 0;
5466 
5467 	if (status != DBG_STATUS_OK)
5468 		return status;
5469 
5470 	return qed_protection_override_dump(p_hwfn,
5471 					    p_ptt, NULL, false, buf_size);
5472 }
5473 
5474 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5475 						 struct qed_ptt *p_ptt,
5476 						 u32 *dump_buf,
5477 						 u32 buf_size_in_dwords,
5478 						 u32 *num_dumped_dwords)
5479 {
5480 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5481 	enum dbg_status status;
5482 
5483 	*num_dumped_dwords = 0;
5484 
5485 	status =
5486 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5487 							      p_ptt,
5488 							      p_size);
5489 	if (status != DBG_STATUS_OK)
5490 		return status;
5491 
5492 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5493 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5494 
5495 	/* Update reset state */
5496 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5497 
5498 	status = qed_protection_override_dump(p_hwfn,
5499 					      p_ptt,
5500 					      dump_buf,
5501 					      true, num_dumped_dwords);
5502 
5503 	/* Revert GRC params to their default */
5504 	qed_dbg_grc_set_params_default(p_hwfn);
5505 
5506 	return status;
5507 }
5508 
5509 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5510 						     struct qed_ptt *p_ptt,
5511 						     u32 *buf_size)
5512 {
5513 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5514 
5515 	*buf_size = 0;
5516 
5517 	if (status != DBG_STATUS_OK)
5518 		return status;
5519 
5520 	/* Update reset state */
5521 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5522 
5523 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5524 
5525 	return DBG_STATUS_OK;
5526 }
5527 
5528 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5529 					struct qed_ptt *p_ptt,
5530 					u32 *dump_buf,
5531 					u32 buf_size_in_dwords,
5532 					u32 *num_dumped_dwords)
5533 {
5534 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5535 	enum dbg_status status;
5536 
5537 	*num_dumped_dwords = 0;
5538 
5539 	status =
5540 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5541 						     p_ptt,
5542 						     p_size);
5543 	if (status != DBG_STATUS_OK)
5544 		return status;
5545 
5546 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5547 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5548 
5549 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5550 
5551 	/* Revert GRC params to their default */
5552 	qed_dbg_grc_set_params_default(p_hwfn);
5553 
5554 	return DBG_STATUS_OK;
5555 }
5556 
5557 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5558 				  struct qed_ptt *p_ptt,
5559 				  enum block_id block_id,
5560 				  enum dbg_attn_type attn_type,
5561 				  bool clear_status,
5562 				  struct dbg_attn_block_result *results)
5563 {
5564 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5565 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5566 	const struct dbg_attn_reg *attn_reg_arr;
5567 
5568 	if (status != DBG_STATUS_OK)
5569 		return status;
5570 
5571 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5572 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5573 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5574 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5575 
5576 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5577 					       attn_type, &num_attn_regs);
5578 
5579 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5580 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5581 		struct dbg_attn_reg_result *reg_result;
5582 		u32 sts_addr, sts_val;
5583 		u16 modes_buf_offset;
5584 		bool eval_mode;
5585 
5586 		/* Check mode */
5587 		eval_mode = GET_FIELD(reg_data->mode.data,
5588 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5589 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5590 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5591 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5592 			continue;
5593 
5594 		/* Mode match - read attention status register */
5595 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5596 					   reg_data->sts_clr_address :
5597 					   GET_FIELD(reg_data->data,
5598 						     DBG_ATTN_REG_STS_ADDRESS));
5599 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5600 		if (!sts_val)
5601 			continue;
5602 
5603 		/* Non-zero attention status - add to results */
5604 		reg_result = &results->reg_results[num_result_regs];
5605 		SET_FIELD(reg_result->data,
5606 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5607 		SET_FIELD(reg_result->data,
5608 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5609 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5610 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5611 		reg_result->sts_val = sts_val;
5612 		reg_result->mask_val = qed_rd(p_hwfn,
5613 					      p_ptt,
5614 					      DWORDS_TO_BYTES
5615 					      (reg_data->mask_address));
5616 		num_result_regs++;
5617 	}
5618 
5619 	results->block_id = (u8)block_id;
5620 	results->names_offset =
5621 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5622 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5623 	SET_FIELD(results->data,
5624 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5625 
5626 	return DBG_STATUS_OK;
5627 }
5628 
5629 /******************************* Data Types **********************************/
5630 
5631 struct block_info {
5632 	const char *name;
5633 	enum block_id id;
5634 };
5635 
5636 /* REG fifo element */
5637 struct reg_fifo_element {
5638 	u64 data;
5639 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5640 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5641 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5642 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5643 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5644 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5645 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5646 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5647 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5648 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5649 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5650 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5651 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5652 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5653 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5654 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5655 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5656 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5657 };
5658 
5659 /* IGU fifo element */
5660 struct igu_fifo_element {
5661 	u32 dword0;
5662 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5663 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5664 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5665 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5666 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5667 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5668 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5669 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5670 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5671 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5672 	u32 dword1;
5673 	u32 dword2;
5674 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5675 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5676 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5677 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5678 	u32 reserved;
5679 };
5680 
5681 struct igu_fifo_wr_data {
5682 	u32 data;
5683 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5684 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5685 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5686 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5687 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5688 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5689 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5690 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5691 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5692 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5693 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5694 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5695 };
5696 
5697 struct igu_fifo_cleanup_wr_data {
5698 	u32 data;
5699 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5700 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5701 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5702 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5703 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5704 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5705 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5706 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5707 };
5708 
5709 /* Protection override element */
5710 struct protection_override_element {
5711 	u64 data;
5712 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5713 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5714 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5715 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5716 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5717 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5718 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5719 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5720 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5721 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5722 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5723 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5724 };
5725 
5726 enum igu_fifo_sources {
5727 	IGU_SRC_PXP0,
5728 	IGU_SRC_PXP1,
5729 	IGU_SRC_PXP2,
5730 	IGU_SRC_PXP3,
5731 	IGU_SRC_PXP4,
5732 	IGU_SRC_PXP5,
5733 	IGU_SRC_PXP6,
5734 	IGU_SRC_PXP7,
5735 	IGU_SRC_CAU,
5736 	IGU_SRC_ATTN,
5737 	IGU_SRC_GRC
5738 };
5739 
5740 enum igu_fifo_addr_types {
5741 	IGU_ADDR_TYPE_MSIX_MEM,
5742 	IGU_ADDR_TYPE_WRITE_PBA,
5743 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5744 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5745 	IGU_ADDR_TYPE_READ_INT,
5746 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5747 	IGU_ADDR_TYPE_RESERVED
5748 };
5749 
5750 struct igu_fifo_addr_data {
5751 	u16 start_addr;
5752 	u16 end_addr;
5753 	char *desc;
5754 	char *vf_desc;
5755 	enum igu_fifo_addr_types type;
5756 };
5757 
5758 struct mcp_trace_meta {
5759 	u32 modules_num;
5760 	char **modules;
5761 	u32 formats_num;
5762 	struct mcp_trace_format *formats;
5763 	bool is_allocated;
5764 };
5765 
5766 /* Debug Tools user data */
5767 struct dbg_tools_user_data {
5768 	struct mcp_trace_meta mcp_trace_meta;
5769 	const u32 *mcp_trace_user_meta_buf;
5770 };
5771 
5772 /******************************** Constants **********************************/
5773 
5774 #define MAX_MSG_LEN				1024
5775 
5776 #define MCP_TRACE_MAX_MODULE_LEN		8
5777 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5778 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5779 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5780 
5781 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5782 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5783 
5784 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5785 
5786 /***************************** Constant Arrays *******************************/
5787 
5788 struct user_dbg_array {
5789 	const u32 *ptr;
5790 	u32 size_in_dwords;
5791 };
5792 
5793 /* Debug arrays */
5794 static struct user_dbg_array
5795 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5796 
5797 /* Block names array */
5798 static struct block_info s_block_info_arr[] = {
5799 	{"grc", BLOCK_GRC},
5800 	{"miscs", BLOCK_MISCS},
5801 	{"misc", BLOCK_MISC},
5802 	{"dbu", BLOCK_DBU},
5803 	{"pglue_b", BLOCK_PGLUE_B},
5804 	{"cnig", BLOCK_CNIG},
5805 	{"cpmu", BLOCK_CPMU},
5806 	{"ncsi", BLOCK_NCSI},
5807 	{"opte", BLOCK_OPTE},
5808 	{"bmb", BLOCK_BMB},
5809 	{"pcie", BLOCK_PCIE},
5810 	{"mcp", BLOCK_MCP},
5811 	{"mcp2", BLOCK_MCP2},
5812 	{"pswhst", BLOCK_PSWHST},
5813 	{"pswhst2", BLOCK_PSWHST2},
5814 	{"pswrd", BLOCK_PSWRD},
5815 	{"pswrd2", BLOCK_PSWRD2},
5816 	{"pswwr", BLOCK_PSWWR},
5817 	{"pswwr2", BLOCK_PSWWR2},
5818 	{"pswrq", BLOCK_PSWRQ},
5819 	{"pswrq2", BLOCK_PSWRQ2},
5820 	{"pglcs", BLOCK_PGLCS},
5821 	{"ptu", BLOCK_PTU},
5822 	{"dmae", BLOCK_DMAE},
5823 	{"tcm", BLOCK_TCM},
5824 	{"mcm", BLOCK_MCM},
5825 	{"ucm", BLOCK_UCM},
5826 	{"xcm", BLOCK_XCM},
5827 	{"ycm", BLOCK_YCM},
5828 	{"pcm", BLOCK_PCM},
5829 	{"qm", BLOCK_QM},
5830 	{"tm", BLOCK_TM},
5831 	{"dorq", BLOCK_DORQ},
5832 	{"brb", BLOCK_BRB},
5833 	{"src", BLOCK_SRC},
5834 	{"prs", BLOCK_PRS},
5835 	{"tsdm", BLOCK_TSDM},
5836 	{"msdm", BLOCK_MSDM},
5837 	{"usdm", BLOCK_USDM},
5838 	{"xsdm", BLOCK_XSDM},
5839 	{"ysdm", BLOCK_YSDM},
5840 	{"psdm", BLOCK_PSDM},
5841 	{"tsem", BLOCK_TSEM},
5842 	{"msem", BLOCK_MSEM},
5843 	{"usem", BLOCK_USEM},
5844 	{"xsem", BLOCK_XSEM},
5845 	{"ysem", BLOCK_YSEM},
5846 	{"psem", BLOCK_PSEM},
5847 	{"rss", BLOCK_RSS},
5848 	{"tmld", BLOCK_TMLD},
5849 	{"muld", BLOCK_MULD},
5850 	{"yuld", BLOCK_YULD},
5851 	{"xyld", BLOCK_XYLD},
5852 	{"ptld", BLOCK_PTLD},
5853 	{"ypld", BLOCK_YPLD},
5854 	{"prm", BLOCK_PRM},
5855 	{"pbf_pb1", BLOCK_PBF_PB1},
5856 	{"pbf_pb2", BLOCK_PBF_PB2},
5857 	{"rpb", BLOCK_RPB},
5858 	{"btb", BLOCK_BTB},
5859 	{"pbf", BLOCK_PBF},
5860 	{"rdif", BLOCK_RDIF},
5861 	{"tdif", BLOCK_TDIF},
5862 	{"cdu", BLOCK_CDU},
5863 	{"ccfc", BLOCK_CCFC},
5864 	{"tcfc", BLOCK_TCFC},
5865 	{"igu", BLOCK_IGU},
5866 	{"cau", BLOCK_CAU},
5867 	{"rgfs", BLOCK_RGFS},
5868 	{"rgsrc", BLOCK_RGSRC},
5869 	{"tgfs", BLOCK_TGFS},
5870 	{"tgsrc", BLOCK_TGSRC},
5871 	{"umac", BLOCK_UMAC},
5872 	{"xmac", BLOCK_XMAC},
5873 	{"dbg", BLOCK_DBG},
5874 	{"nig", BLOCK_NIG},
5875 	{"wol", BLOCK_WOL},
5876 	{"bmbn", BLOCK_BMBN},
5877 	{"ipc", BLOCK_IPC},
5878 	{"nwm", BLOCK_NWM},
5879 	{"nws", BLOCK_NWS},
5880 	{"ms", BLOCK_MS},
5881 	{"phy_pcie", BLOCK_PHY_PCIE},
5882 	{"led", BLOCK_LED},
5883 	{"avs_wrap", BLOCK_AVS_WRAP},
5884 	{"pxpreqbus", BLOCK_PXPREQBUS},
5885 	{"misc_aeu", BLOCK_MISC_AEU},
5886 	{"bar0_map", BLOCK_BAR0_MAP}
5887 };
5888 
5889 /* Status string array */
5890 static const char * const s_status_str[] = {
5891 	/* DBG_STATUS_OK */
5892 	"Operation completed successfully",
5893 
5894 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5895 	"Debug application version wasn't set",
5896 
5897 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5898 	"Unsupported debug application version",
5899 
5900 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5901 	"The debug block wasn't reset since the last recording",
5902 
5903 	/* DBG_STATUS_INVALID_ARGS */
5904 	"Invalid arguments",
5905 
5906 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5907 	"The debug output was already set",
5908 
5909 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5910 	"Invalid PCI buffer size",
5911 
5912 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5913 	"PCI buffer allocation failed",
5914 
5915 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5916 	"A PCI buffer wasn't allocated",
5917 
5918 	/* DBG_STATUS_TOO_MANY_INPUTS */
5919 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5920 
5921 	/* DBG_STATUS_INPUT_OVERLAP */
5922 	"Overlapping debug bus inputs",
5923 
5924 	/* DBG_STATUS_HW_ONLY_RECORDING */
5925 	"Cannot record Storm data since the entire recording cycle is used by HW",
5926 
5927 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5928 	"The Storm was already enabled",
5929 
5930 	/* DBG_STATUS_STORM_NOT_ENABLED */
5931 	"The specified Storm wasn't enabled",
5932 
5933 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5934 	"The block was already enabled",
5935 
5936 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5937 	"The specified block wasn't enabled",
5938 
5939 	/* DBG_STATUS_NO_INPUT_ENABLED */
5940 	"No input was enabled for recording",
5941 
5942 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5943 	"Filters and triggers are not allowed when recording in 64b units",
5944 
5945 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5946 	"The filter was already enabled",
5947 
5948 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5949 	"The trigger was already enabled",
5950 
5951 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5952 	"The trigger wasn't enabled",
5953 
5954 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5955 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5956 
5957 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5958 	"Cannot add more than 3 trigger states",
5959 
5960 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5961 	"Cannot add more than 4 constraints per filter or trigger state",
5962 
5963 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5964 	"The recording wasn't started",
5965 
5966 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5967 	"A trigger was configured, but it didn't trigger",
5968 
5969 	/* DBG_STATUS_NO_DATA_RECORDED */
5970 	"No data was recorded",
5971 
5972 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5973 	"Dump buffer is too small",
5974 
5975 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5976 	"Dumped data is not aligned to chunks",
5977 
5978 	/* DBG_STATUS_UNKNOWN_CHIP */
5979 	"Unknown chip",
5980 
5981 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5982 	"Failed allocating virtual memory",
5983 
5984 	/* DBG_STATUS_BLOCK_IN_RESET */
5985 	"The input block is in reset",
5986 
5987 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5988 	"Invalid MCP trace signature found in NVRAM",
5989 
5990 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5991 	"Invalid bundle ID found in NVRAM",
5992 
5993 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5994 	"Failed getting NVRAM image",
5995 
5996 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5997 	"NVRAM image is not dword-aligned",
5998 
5999 	/* DBG_STATUS_NVRAM_READ_FAILED */
6000 	"Failed reading from NVRAM",
6001 
6002 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6003 	"Idle check parsing failed",
6004 
6005 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6006 	"MCP Trace data is corrupt",
6007 
6008 	/* DBG_STATUS_MCP_TRACE_NO_META */
6009 	"Dump doesn't contain meta data - it must be provided in image file",
6010 
6011 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6012 	"Failed to halt MCP",
6013 
6014 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6015 	"Failed to resume MCP after halt",
6016 
6017 	/* DBG_STATUS_RESERVED2 */
6018 	"Reserved debug status - shouldn't be returned",
6019 
6020 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6021 	"Failed to empty SEMI sync FIFO",
6022 
6023 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6024 	"IGU FIFO data is corrupt",
6025 
6026 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6027 	"MCP failed to mask parities",
6028 
6029 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6030 	"FW Asserts parsing failed",
6031 
6032 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6033 	"GRC FIFO data is corrupt",
6034 
6035 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6036 	"Protection Override data is corrupt",
6037 
6038 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6039 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6040 
6041 	/* DBG_STATUS_FILTER_BUG */
6042 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
6043 
6044 	/* DBG_STATUS_NON_MATCHING_LINES */
6045 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
6046 
6047 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
6048 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
6049 
6050 	/* DBG_STATUS_DBG_BUS_IN_USE */
6051 	"The debug bus is in use"
6052 };
6053 
6054 /* Idle check severity names array */
6055 static const char * const s_idle_chk_severity_str[] = {
6056 	"Error",
6057 	"Error if no traffic",
6058 	"Warning"
6059 };
6060 
6061 /* MCP Trace level names array */
6062 static const char * const s_mcp_trace_level_str[] = {
6063 	"ERROR",
6064 	"TRACE",
6065 	"DEBUG"
6066 };
6067 
6068 /* Access type names array */
6069 static const char * const s_access_strs[] = {
6070 	"read",
6071 	"write"
6072 };
6073 
6074 /* Privilege type names array */
6075 static const char * const s_privilege_strs[] = {
6076 	"VF",
6077 	"PDA",
6078 	"HV",
6079 	"UA"
6080 };
6081 
6082 /* Protection type names array */
6083 static const char * const s_protection_strs[] = {
6084 	"(default)",
6085 	"(default)",
6086 	"(default)",
6087 	"(default)",
6088 	"override VF",
6089 	"override PDA",
6090 	"override HV",
6091 	"override UA"
6092 };
6093 
6094 /* Master type names array */
6095 static const char * const s_master_strs[] = {
6096 	"???",
6097 	"pxp",
6098 	"mcp",
6099 	"msdm",
6100 	"psdm",
6101 	"ysdm",
6102 	"usdm",
6103 	"tsdm",
6104 	"xsdm",
6105 	"dbu",
6106 	"dmae",
6107 	"???",
6108 	"???",
6109 	"???",
6110 	"???",
6111 	"???"
6112 };
6113 
6114 /* REG FIFO error messages array */
6115 static const char * const s_reg_fifo_error_strs[] = {
6116 	"grc timeout",
6117 	"address doesn't belong to any block",
6118 	"reserved address in block or write to read-only address",
6119 	"privilege/protection mismatch",
6120 	"path isolation error"
6121 };
6122 
6123 /* IGU FIFO sources array */
6124 static const char * const s_igu_fifo_source_strs[] = {
6125 	"TSTORM",
6126 	"MSTORM",
6127 	"USTORM",
6128 	"XSTORM",
6129 	"YSTORM",
6130 	"PSTORM",
6131 	"PCIE",
6132 	"NIG_QM_PBF",
6133 	"CAU",
6134 	"ATTN",
6135 	"GRC",
6136 };
6137 
6138 /* IGU FIFO error messages */
6139 static const char * const s_igu_fifo_error_strs[] = {
6140 	"no error",
6141 	"length error",
6142 	"function disabled",
6143 	"VF sent command to attention address",
6144 	"host sent prod update command",
6145 	"read of during interrupt register while in MIMD mode",
6146 	"access to PXP BAR reserved address",
6147 	"producer update command to attention index",
6148 	"unknown error",
6149 	"SB index not valid",
6150 	"SB relative index and FID not found",
6151 	"FID not match",
6152 	"command with error flag asserted (PCI error or CAU discard)",
6153 	"VF sent cleanup and RF cleanup is disabled",
6154 	"cleanup command on type bigger than 4"
6155 };
6156 
6157 /* IGU FIFO address data */
6158 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6159 	{0x0, 0x101, "MSI-X Memory", NULL,
6160 	 IGU_ADDR_TYPE_MSIX_MEM},
6161 	{0x102, 0x1ff, "reserved", NULL,
6162 	 IGU_ADDR_TYPE_RESERVED},
6163 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6164 	 IGU_ADDR_TYPE_WRITE_PBA},
6165 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6166 	 IGU_ADDR_TYPE_WRITE_PBA},
6167 	{0x202, 0x202, "Write PBA[128]", "reserved",
6168 	 IGU_ADDR_TYPE_WRITE_PBA},
6169 	{0x203, 0x3ff, "reserved", NULL,
6170 	 IGU_ADDR_TYPE_RESERVED},
6171 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6172 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6173 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6174 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6175 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6176 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6177 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6178 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6179 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6180 	 IGU_ADDR_TYPE_READ_INT},
6181 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6182 	 IGU_ADDR_TYPE_READ_INT},
6183 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6184 	 IGU_ADDR_TYPE_READ_INT},
6185 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6186 	 IGU_ADDR_TYPE_READ_INT},
6187 	{0x5f7, 0x5ff, "reserved", NULL,
6188 	 IGU_ADDR_TYPE_RESERVED},
6189 	{0x600, 0x7ff, "Producer update", NULL,
6190 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6191 };
6192 
6193 /******************************** Variables **********************************/
6194 
6195 /* Temporary buffer, used for print size calculations */
6196 static char s_temp_buf[MAX_MSG_LEN];
6197 
6198 /**************************** Private Functions ******************************/
6199 
6200 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6201 {
6202 	return (a + b) % size;
6203 }
6204 
6205 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6206 {
6207 	return (size + a - b) % size;
6208 }
6209 
6210 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6211  * bytes) and returns them as a dword value. the specified buffer offset is
6212  * updated.
6213  */
6214 static u32 qed_read_from_cyclic_buf(void *buf,
6215 				    u32 *offset,
6216 				    u32 buf_size, u8 num_bytes_to_read)
6217 {
6218 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6219 	u32 val = 0;
6220 
6221 	val_ptr = (u8 *)&val;
6222 
6223 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6224 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6225 	 */
6226 	for (i = 0; i < num_bytes_to_read; i++) {
6227 		val_ptr[i] = bytes_buf[*offset];
6228 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6229 	}
6230 
6231 	return val;
6232 }
6233 
6234 /* Reads and returns the next byte from the specified buffer.
6235  * The specified buffer offset is updated.
6236  */
6237 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6238 {
6239 	return ((u8 *)buf)[(*offset)++];
6240 }
6241 
6242 /* Reads and returns the next dword from the specified buffer.
6243  * The specified buffer offset is updated.
6244  */
6245 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6246 {
6247 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6248 
6249 	*offset += 4;
6250 
6251 	return dword_val;
6252 }
6253 
6254 /* Reads the next string from the specified buffer, and copies it to the
6255  * specified pointer. The specified buffer offset is updated.
6256  */
6257 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6258 {
6259 	const char *source_str = &((const char *)buf)[*offset];
6260 
6261 	strncpy(dest, source_str, size);
6262 	dest[size - 1] = '\0';
6263 	*offset += size;
6264 }
6265 
6266 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6267  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6268  */
6269 static char *qed_get_buf_ptr(void *buf, u32 offset)
6270 {
6271 	return buf ? (char *)buf + offset : s_temp_buf;
6272 }
6273 
6274 /* Reads a param from the specified buffer. Returns the number of dwords read.
6275  * If the returned str_param is NULL, the param is numeric and its value is
6276  * returned in num_param.
6277  * Otheriwise, the param is a string and its pointer is returned in str_param.
6278  */
6279 static u32 qed_read_param(u32 *dump_buf,
6280 			  const char **param_name,
6281 			  const char **param_str_val, u32 *param_num_val)
6282 {
6283 	char *char_buf = (char *)dump_buf;
6284 	size_t offset = 0;
6285 
6286 	/* Extract param name */
6287 	*param_name = char_buf;
6288 	offset += strlen(*param_name) + 1;
6289 
6290 	/* Check param type */
6291 	if (*(char_buf + offset++)) {
6292 		/* String param */
6293 		*param_str_val = char_buf + offset;
6294 		*param_num_val = 0;
6295 		offset += strlen(*param_str_val) + 1;
6296 		if (offset & 0x3)
6297 			offset += (4 - (offset & 0x3));
6298 	} else {
6299 		/* Numeric param */
6300 		*param_str_val = NULL;
6301 		if (offset & 0x3)
6302 			offset += (4 - (offset & 0x3));
6303 		*param_num_val = *(u32 *)(char_buf + offset);
6304 		offset += 4;
6305 	}
6306 
6307 	return (u32)offset / 4;
6308 }
6309 
6310 /* Reads a section header from the specified buffer.
6311  * Returns the number of dwords read.
6312  */
6313 static u32 qed_read_section_hdr(u32 *dump_buf,
6314 				const char **section_name,
6315 				u32 *num_section_params)
6316 {
6317 	const char *param_str_val;
6318 
6319 	return qed_read_param(dump_buf,
6320 			      section_name, &param_str_val, num_section_params);
6321 }
6322 
6323 /* Reads section params from the specified buffer and prints them to the results
6324  * buffer. Returns the number of dwords read.
6325  */
6326 static u32 qed_print_section_params(u32 *dump_buf,
6327 				    u32 num_section_params,
6328 				    char *results_buf, u32 *num_chars_printed)
6329 {
6330 	u32 i, dump_offset = 0, results_offset = 0;
6331 
6332 	for (i = 0; i < num_section_params; i++) {
6333 		const char *param_name, *param_str_val;
6334 		u32 param_num_val = 0;
6335 
6336 		dump_offset += qed_read_param(dump_buf + dump_offset,
6337 					      &param_name,
6338 					      &param_str_val, &param_num_val);
6339 
6340 		if (param_str_val)
6341 			results_offset +=
6342 				sprintf(qed_get_buf_ptr(results_buf,
6343 							results_offset),
6344 					"%s: %s\n", param_name, param_str_val);
6345 		else if (strcmp(param_name, "fw-timestamp"))
6346 			results_offset +=
6347 				sprintf(qed_get_buf_ptr(results_buf,
6348 							results_offset),
6349 					"%s: %d\n", param_name, param_num_val);
6350 	}
6351 
6352 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6353 				  "\n");
6354 
6355 	*num_chars_printed = results_offset;
6356 
6357 	return dump_offset;
6358 }
6359 
6360 static struct dbg_tools_user_data *
6361 qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
6362 {
6363 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6364 }
6365 
6366 /* Parses the idle check rules and returns the number of characters printed.
6367  * In case of parsing error, returns 0.
6368  */
6369 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6370 					 u32 *dump_buf_end,
6371 					 u32 num_rules,
6372 					 bool print_fw_idle_chk,
6373 					 char *results_buf,
6374 					 u32 *num_errors, u32 *num_warnings)
6375 {
6376 	/* Offset in results_buf in bytes */
6377 	u32 results_offset = 0;
6378 
6379 	u32 rule_idx;
6380 	u16 i, j;
6381 
6382 	*num_errors = 0;
6383 	*num_warnings = 0;
6384 
6385 	/* Go over dumped results */
6386 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6387 	     rule_idx++) {
6388 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6389 		struct dbg_idle_chk_result_hdr *hdr;
6390 		const char *parsing_str, *lsi_msg;
6391 		u32 parsing_str_offset;
6392 		bool has_fw_msg;
6393 		u8 curr_reg_id;
6394 
6395 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6396 		rule_parsing_data =
6397 			(const struct dbg_idle_chk_rule_parsing_data *)
6398 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6399 			ptr[hdr->rule_id];
6400 		parsing_str_offset =
6401 			GET_FIELD(rule_parsing_data->data,
6402 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6403 		has_fw_msg =
6404 			GET_FIELD(rule_parsing_data->data,
6405 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6406 		parsing_str =
6407 			&((const char *)
6408 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6409 			[parsing_str_offset];
6410 		lsi_msg = parsing_str;
6411 		curr_reg_id = 0;
6412 
6413 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6414 			return 0;
6415 
6416 		/* Skip rule header */
6417 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6418 
6419 		/* Update errors/warnings count */
6420 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6421 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6422 			(*num_errors)++;
6423 		else
6424 			(*num_warnings)++;
6425 
6426 		/* Print rule severity */
6427 		results_offset +=
6428 		    sprintf(qed_get_buf_ptr(results_buf,
6429 					    results_offset), "%s: ",
6430 			    s_idle_chk_severity_str[hdr->severity]);
6431 
6432 		/* Print rule message */
6433 		if (has_fw_msg)
6434 			parsing_str += strlen(parsing_str) + 1;
6435 		results_offset +=
6436 		    sprintf(qed_get_buf_ptr(results_buf,
6437 					    results_offset), "%s.",
6438 			    has_fw_msg &&
6439 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6440 		parsing_str += strlen(parsing_str) + 1;
6441 
6442 		/* Print register values */
6443 		results_offset +=
6444 		    sprintf(qed_get_buf_ptr(results_buf,
6445 					    results_offset), " Registers:");
6446 		for (i = 0;
6447 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6448 		     i++) {
6449 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6450 			bool is_mem;
6451 			u8 reg_id;
6452 
6453 			reg_hdr =
6454 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6455 			is_mem = GET_FIELD(reg_hdr->data,
6456 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6457 			reg_id = GET_FIELD(reg_hdr->data,
6458 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6459 
6460 			/* Skip reg header */
6461 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6462 
6463 			/* Skip register names until the required reg_id is
6464 			 * reached.
6465 			 */
6466 			for (; reg_id > curr_reg_id;
6467 			     curr_reg_id++,
6468 			     parsing_str += strlen(parsing_str) + 1);
6469 
6470 			results_offset +=
6471 			    sprintf(qed_get_buf_ptr(results_buf,
6472 						    results_offset), " %s",
6473 				    parsing_str);
6474 			if (i < hdr->num_dumped_cond_regs && is_mem)
6475 				results_offset +=
6476 				    sprintf(qed_get_buf_ptr(results_buf,
6477 							    results_offset),
6478 					    "[%d]", hdr->mem_entry_id +
6479 					    reg_hdr->start_entry);
6480 			results_offset +=
6481 			    sprintf(qed_get_buf_ptr(results_buf,
6482 						    results_offset), "=");
6483 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6484 				results_offset +=
6485 				    sprintf(qed_get_buf_ptr(results_buf,
6486 							    results_offset),
6487 					    "0x%x", *dump_buf);
6488 				if (j < reg_hdr->size - 1)
6489 					results_offset +=
6490 					    sprintf(qed_get_buf_ptr
6491 						    (results_buf,
6492 						     results_offset), ",");
6493 			}
6494 		}
6495 
6496 		results_offset +=
6497 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6498 	}
6499 
6500 	/* Check if end of dump buffer was exceeded */
6501 	if (dump_buf > dump_buf_end)
6502 		return 0;
6503 
6504 	return results_offset;
6505 }
6506 
6507 /* Parses an idle check dump buffer.
6508  * If result_buf is not NULL, the idle check results are printed to it.
6509  * In any case, the required results buffer size is assigned to
6510  * parsed_results_bytes.
6511  * The parsing status is returned.
6512  */
6513 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6514 					       u32 num_dumped_dwords,
6515 					       char *results_buf,
6516 					       u32 *parsed_results_bytes,
6517 					       u32 *num_errors,
6518 					       u32 *num_warnings)
6519 {
6520 	const char *section_name, *param_name, *param_str_val;
6521 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6522 	u32 num_section_params = 0, num_rules;
6523 
6524 	/* Offset in results_buf in bytes */
6525 	u32 results_offset = 0;
6526 
6527 	*parsed_results_bytes = 0;
6528 	*num_errors = 0;
6529 	*num_warnings = 0;
6530 
6531 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6532 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6533 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6534 
6535 	/* Read global_params section */
6536 	dump_buf += qed_read_section_hdr(dump_buf,
6537 					 &section_name, &num_section_params);
6538 	if (strcmp(section_name, "global_params"))
6539 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6540 
6541 	/* Print global params */
6542 	dump_buf += qed_print_section_params(dump_buf,
6543 					     num_section_params,
6544 					     results_buf, &results_offset);
6545 
6546 	/* Read idle_chk section */
6547 	dump_buf += qed_read_section_hdr(dump_buf,
6548 					 &section_name, &num_section_params);
6549 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6550 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6551 	dump_buf += qed_read_param(dump_buf,
6552 				   &param_name, &param_str_val, &num_rules);
6553 	if (strcmp(param_name, "num_rules"))
6554 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6555 
6556 	if (num_rules) {
6557 		u32 rules_print_size;
6558 
6559 		/* Print FW output */
6560 		results_offset +=
6561 		    sprintf(qed_get_buf_ptr(results_buf,
6562 					    results_offset),
6563 			    "FW_IDLE_CHECK:\n");
6564 		rules_print_size =
6565 			qed_parse_idle_chk_dump_rules(dump_buf,
6566 						      dump_buf_end,
6567 						      num_rules,
6568 						      true,
6569 						      results_buf ?
6570 						      results_buf +
6571 						      results_offset :
6572 						      NULL,
6573 						      num_errors,
6574 						      num_warnings);
6575 		results_offset += rules_print_size;
6576 		if (!rules_print_size)
6577 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6578 
6579 		/* Print LSI output */
6580 		results_offset +=
6581 		    sprintf(qed_get_buf_ptr(results_buf,
6582 					    results_offset),
6583 			    "\nLSI_IDLE_CHECK:\n");
6584 		rules_print_size =
6585 			qed_parse_idle_chk_dump_rules(dump_buf,
6586 						      dump_buf_end,
6587 						      num_rules,
6588 						      false,
6589 						      results_buf ?
6590 						      results_buf +
6591 						      results_offset :
6592 						      NULL,
6593 						      num_errors,
6594 						      num_warnings);
6595 		results_offset += rules_print_size;
6596 		if (!rules_print_size)
6597 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6598 	}
6599 
6600 	/* Print errors/warnings count */
6601 	if (*num_errors)
6602 		results_offset +=
6603 		    sprintf(qed_get_buf_ptr(results_buf,
6604 					    results_offset),
6605 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6606 			    *num_errors, *num_warnings);
6607 	else if (*num_warnings)
6608 		results_offset +=
6609 		    sprintf(qed_get_buf_ptr(results_buf,
6610 					    results_offset),
6611 			    "\nIdle Check completed successfully (with %d warnings)\n",
6612 			    *num_warnings);
6613 	else
6614 		results_offset +=
6615 		    sprintf(qed_get_buf_ptr(results_buf,
6616 					    results_offset),
6617 			    "\nIdle Check completed successfully\n");
6618 
6619 	/* Add 1 for string NULL termination */
6620 	*parsed_results_bytes = results_offset + 1;
6621 
6622 	return DBG_STATUS_OK;
6623 }
6624 
6625 /* Allocates and fills MCP Trace meta data based on the specified meta data
6626  * dump buffer.
6627  * Returns debug status code.
6628  */
6629 static enum dbg_status
6630 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6631 			      const u32 *meta_buf)
6632 {
6633 	struct dbg_tools_user_data *dev_user_data;
6634 	u32 offset = 0, signature, i;
6635 	struct mcp_trace_meta *meta;
6636 	u8 *meta_buf_bytes;
6637 
6638 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6639 	meta = &dev_user_data->mcp_trace_meta;
6640 	meta_buf_bytes = (u8 *)meta_buf;
6641 
6642 	/* Free the previous meta before loading a new one. */
6643 	if (meta->is_allocated)
6644 		qed_mcp_trace_free_meta_data(p_hwfn);
6645 
6646 	memset(meta, 0, sizeof(*meta));
6647 
6648 	/* Read first signature */
6649 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6650 	if (signature != NVM_MAGIC_VALUE)
6651 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6652 
6653 	/* Read no. of modules and allocate memory for their pointers */
6654 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6655 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6656 				GFP_KERNEL);
6657 	if (!meta->modules)
6658 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6659 
6660 	/* Allocate and read all module strings */
6661 	for (i = 0; i < meta->modules_num; i++) {
6662 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6663 
6664 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6665 		if (!(*(meta->modules + i))) {
6666 			/* Update number of modules to be released */
6667 			meta->modules_num = i ? i - 1 : 0;
6668 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6669 		}
6670 
6671 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6672 				      *(meta->modules + i));
6673 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6674 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6675 	}
6676 
6677 	/* Read second signature */
6678 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6679 	if (signature != NVM_MAGIC_VALUE)
6680 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6681 
6682 	/* Read number of formats and allocate memory for all formats */
6683 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6684 	meta->formats = kcalloc(meta->formats_num,
6685 				sizeof(struct mcp_trace_format),
6686 				GFP_KERNEL);
6687 	if (!meta->formats)
6688 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6689 
6690 	/* Allocate and read all strings */
6691 	for (i = 0; i < meta->formats_num; i++) {
6692 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6693 		u8 format_len;
6694 
6695 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6696 							   &offset);
6697 		format_len =
6698 		    (format_ptr->data &
6699 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6700 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6701 		if (!format_ptr->format_str) {
6702 			/* Update number of modules to be released */
6703 			meta->formats_num = i ? i - 1 : 0;
6704 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6705 		}
6706 
6707 		qed_read_str_from_buf(meta_buf_bytes,
6708 				      &offset,
6709 				      format_len, format_ptr->format_str);
6710 	}
6711 
6712 	meta->is_allocated = true;
6713 	return DBG_STATUS_OK;
6714 }
6715 
6716 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6717  * are printed to it. The parsing status is returned.
6718  * Arguments:
6719  * trace_buf - MCP trace cyclic buffer
6720  * trace_buf_size - MCP trace cyclic buffer size in bytes
6721  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6722  *               buffer.
6723  * data_size - size in bytes of data to parse.
6724  * parsed_buf - destination buffer for parsed data.
6725  * parsed_results_bytes - size of parsed data in bytes.
6726  */
6727 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6728 					       u8 *trace_buf,
6729 					       u32 trace_buf_size,
6730 					       u32 data_offset,
6731 					       u32 data_size,
6732 					       char *parsed_buf,
6733 					       u32 *parsed_results_bytes)
6734 {
6735 	struct dbg_tools_user_data *dev_user_data;
6736 	struct mcp_trace_meta *meta;
6737 	u32 param_mask, param_shift;
6738 	enum dbg_status status;
6739 
6740 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6741 	meta = &dev_user_data->mcp_trace_meta;
6742 	*parsed_results_bytes = 0;
6743 
6744 	if (!meta->is_allocated)
6745 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6746 
6747 	status = DBG_STATUS_OK;
6748 
6749 	while (data_size) {
6750 		struct mcp_trace_format *format_ptr;
6751 		u8 format_level, format_module;
6752 		u32 params[3] = { 0, 0, 0 };
6753 		u32 header, format_idx, i;
6754 
6755 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6756 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6757 
6758 		header = qed_read_from_cyclic_buf(trace_buf,
6759 						  &data_offset,
6760 						  trace_buf_size,
6761 						  MFW_TRACE_ENTRY_SIZE);
6762 		data_size -= MFW_TRACE_ENTRY_SIZE;
6763 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6764 
6765 		/* Skip message if its index doesn't exist in the meta data */
6766 		if (format_idx >= meta->formats_num) {
6767 			u8 format_size =
6768 				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6769 				     MFW_TRACE_PRM_SIZE_SHIFT);
6770 
6771 			if (data_size < format_size)
6772 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6773 
6774 			data_offset = qed_cyclic_add(data_offset,
6775 						     format_size,
6776 						     trace_buf_size);
6777 			data_size -= format_size;
6778 			continue;
6779 		}
6780 
6781 		format_ptr = &meta->formats[format_idx];
6782 
6783 		for (i = 0,
6784 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6785 		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6786 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6787 		     i++,
6788 		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6789 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6790 			/* Extract param size (0..3) */
6791 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6792 					     param_shift);
6793 
6794 			/* If the param size is zero, there are no other
6795 			 * parameters.
6796 			 */
6797 			if (!param_size)
6798 				break;
6799 
6800 			/* Size is encoded using 2 bits, where 3 is used to
6801 			 * encode 4.
6802 			 */
6803 			if (param_size == 3)
6804 				param_size = 4;
6805 
6806 			if (data_size < param_size)
6807 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6808 
6809 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6810 							     &data_offset,
6811 							     trace_buf_size,
6812 							     param_size);
6813 			data_size -= param_size;
6814 		}
6815 
6816 		format_level = (u8)((format_ptr->data &
6817 				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6818 				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6819 		format_module = (u8)((format_ptr->data &
6820 				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6821 				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6822 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6823 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6824 
6825 		/* Print current message to results buffer */
6826 		*parsed_results_bytes +=
6827 			sprintf(qed_get_buf_ptr(parsed_buf,
6828 						*parsed_results_bytes),
6829 				"%s %-8s: ",
6830 				s_mcp_trace_level_str[format_level],
6831 				meta->modules[format_module]);
6832 		*parsed_results_bytes +=
6833 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6834 			    format_ptr->format_str,
6835 			    params[0], params[1], params[2]);
6836 	}
6837 
6838 	/* Add string NULL terminator */
6839 	(*parsed_results_bytes)++;
6840 
6841 	return status;
6842 }
6843 
6844 /* Parses an MCP Trace dump buffer.
6845  * If result_buf is not NULL, the MCP Trace results are printed to it.
6846  * In any case, the required results buffer size is assigned to
6847  * parsed_results_bytes.
6848  * The parsing status is returned.
6849  */
6850 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6851 						u32 *dump_buf,
6852 						char *results_buf,
6853 						u32 *parsed_results_bytes,
6854 						bool free_meta_data)
6855 {
6856 	const char *section_name, *param_name, *param_str_val;
6857 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6858 	u32 offset, results_offset, results_buf_bytes;
6859 	u32 param_num_val, num_section_params;
6860 	struct mcp_trace *trace;
6861 	enum dbg_status status;
6862 	const u32 *meta_buf;
6863 	u8 *trace_buf;
6864 
6865 	*parsed_results_bytes = 0;
6866 
6867 	/* Read global_params section */
6868 	dump_buf += qed_read_section_hdr(dump_buf,
6869 					 &section_name, &num_section_params);
6870 	if (strcmp(section_name, "global_params"))
6871 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6872 
6873 	/* Print global params */
6874 	dump_buf += qed_print_section_params(dump_buf,
6875 					     num_section_params,
6876 					     results_buf, &results_offset);
6877 
6878 	/* Read trace_data section */
6879 	dump_buf += qed_read_section_hdr(dump_buf,
6880 					 &section_name, &num_section_params);
6881 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6882 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6883 	dump_buf += qed_read_param(dump_buf,
6884 				   &param_name, &param_str_val, &param_num_val);
6885 	if (strcmp(param_name, "size"))
6886 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6887 	trace_data_dwords = param_num_val;
6888 
6889 	/* Prepare trace info */
6890 	trace = (struct mcp_trace *)dump_buf;
6891 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6892 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6893 
6894 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6895 	offset = trace->trace_oldest;
6896 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6897 	dump_buf += trace_data_dwords;
6898 
6899 	/* Read meta_data section */
6900 	dump_buf += qed_read_section_hdr(dump_buf,
6901 					 &section_name, &num_section_params);
6902 	if (strcmp(section_name, "mcp_trace_meta"))
6903 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6904 	dump_buf += qed_read_param(dump_buf,
6905 				   &param_name, &param_str_val, &param_num_val);
6906 	if (strcmp(param_name, "size"))
6907 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6908 	trace_meta_dwords = param_num_val;
6909 
6910 	/* Choose meta data buffer */
6911 	if (!trace_meta_dwords) {
6912 		/* Dump doesn't include meta data */
6913 		struct dbg_tools_user_data *dev_user_data =
6914 			qed_dbg_get_user_data(p_hwfn);
6915 
6916 		if (!dev_user_data->mcp_trace_user_meta_buf)
6917 			return DBG_STATUS_MCP_TRACE_NO_META;
6918 
6919 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6920 	} else {
6921 		/* Dump includes meta data */
6922 		meta_buf = dump_buf;
6923 	}
6924 
6925 	/* Allocate meta data memory */
6926 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6927 	if (status != DBG_STATUS_OK)
6928 		return status;
6929 
6930 	status = qed_parse_mcp_trace_buf(p_hwfn,
6931 					 trace_buf,
6932 					 trace->size,
6933 					 offset,
6934 					 data_size,
6935 					 results_buf ?
6936 					 results_buf + results_offset :
6937 					 NULL,
6938 					 &results_buf_bytes);
6939 	if (status != DBG_STATUS_OK)
6940 		return status;
6941 
6942 	if (free_meta_data)
6943 		qed_mcp_trace_free_meta_data(p_hwfn);
6944 
6945 	*parsed_results_bytes = results_offset + results_buf_bytes;
6946 
6947 	return DBG_STATUS_OK;
6948 }
6949 
6950 /* Parses a Reg FIFO dump buffer.
6951  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6952  * In any case, the required results buffer size is assigned to
6953  * parsed_results_bytes.
6954  * The parsing status is returned.
6955  */
6956 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6957 					       char *results_buf,
6958 					       u32 *parsed_results_bytes)
6959 {
6960 	const char *section_name, *param_name, *param_str_val;
6961 	u32 param_num_val, num_section_params, num_elements;
6962 	struct reg_fifo_element *elements;
6963 	u8 i, j, err_val, vf_val;
6964 	u32 results_offset = 0;
6965 	char vf_str[4];
6966 
6967 	/* Read global_params section */
6968 	dump_buf += qed_read_section_hdr(dump_buf,
6969 					 &section_name, &num_section_params);
6970 	if (strcmp(section_name, "global_params"))
6971 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6972 
6973 	/* Print global params */
6974 	dump_buf += qed_print_section_params(dump_buf,
6975 					     num_section_params,
6976 					     results_buf, &results_offset);
6977 
6978 	/* Read reg_fifo_data section */
6979 	dump_buf += qed_read_section_hdr(dump_buf,
6980 					 &section_name, &num_section_params);
6981 	if (strcmp(section_name, "reg_fifo_data"))
6982 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6983 	dump_buf += qed_read_param(dump_buf,
6984 				   &param_name, &param_str_val, &param_num_val);
6985 	if (strcmp(param_name, "size"))
6986 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6987 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6988 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6989 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6990 	elements = (struct reg_fifo_element *)dump_buf;
6991 
6992 	/* Decode elements */
6993 	for (i = 0; i < num_elements; i++) {
6994 		bool err_printed = false;
6995 
6996 		/* Discover if element belongs to a VF or a PF */
6997 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6998 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6999 			sprintf(vf_str, "%s", "N/A");
7000 		else
7001 			sprintf(vf_str, "%d", vf_val);
7002 
7003 		/* Add parsed element to parsed buffer */
7004 		results_offset +=
7005 		    sprintf(qed_get_buf_ptr(results_buf,
7006 					    results_offset),
7007 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
7008 			    elements[i].data,
7009 			    (u32)GET_FIELD(elements[i].data,
7010 					   REG_FIFO_ELEMENT_ADDRESS) *
7011 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7012 			    s_access_strs[GET_FIELD(elements[i].data,
7013 						    REG_FIFO_ELEMENT_ACCESS)],
7014 			    (u32)GET_FIELD(elements[i].data,
7015 					   REG_FIFO_ELEMENT_PF),
7016 			    vf_str,
7017 			    (u32)GET_FIELD(elements[i].data,
7018 					   REG_FIFO_ELEMENT_PORT),
7019 			    s_privilege_strs[GET_FIELD(elements[i].data,
7020 						REG_FIFO_ELEMENT_PRIVILEGE)],
7021 			    s_protection_strs[GET_FIELD(elements[i].data,
7022 						REG_FIFO_ELEMENT_PROTECTION)],
7023 			    s_master_strs[GET_FIELD(elements[i].data,
7024 						REG_FIFO_ELEMENT_MASTER)]);
7025 
7026 		/* Print errors */
7027 		for (j = 0,
7028 		     err_val = GET_FIELD(elements[i].data,
7029 					 REG_FIFO_ELEMENT_ERROR);
7030 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
7031 		     j++, err_val >>= 1) {
7032 			if (err_val & 0x1) {
7033 				if (err_printed)
7034 					results_offset +=
7035 					    sprintf(qed_get_buf_ptr
7036 						    (results_buf,
7037 						     results_offset), ", ");
7038 				results_offset +=
7039 				    sprintf(qed_get_buf_ptr
7040 					    (results_buf, results_offset), "%s",
7041 					    s_reg_fifo_error_strs[j]);
7042 				err_printed = true;
7043 			}
7044 		}
7045 
7046 		results_offset +=
7047 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
7048 	}
7049 
7050 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7051 						  results_offset),
7052 				  "fifo contained %d elements", num_elements);
7053 
7054 	/* Add 1 for string NULL termination */
7055 	*parsed_results_bytes = results_offset + 1;
7056 
7057 	return DBG_STATUS_OK;
7058 }
7059 
7060 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7061 						  *element, char
7062 						  *results_buf,
7063 						  u32 *results_offset)
7064 {
7065 	const struct igu_fifo_addr_data *found_addr = NULL;
7066 	u8 source, err_type, i, is_cleanup;
7067 	char parsed_addr_data[32];
7068 	char parsed_wr_data[256];
7069 	u32 wr_data, prod_cons;
7070 	bool is_wr_cmd, is_pf;
7071 	u16 cmd_addr;
7072 	u64 dword12;
7073 
7074 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7075 	 * FIFO element.
7076 	 */
7077 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7078 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7079 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7080 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7081 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7082 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7083 
7084 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7085 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7086 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7087 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7088 
7089 	/* Find address data */
7090 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7091 		const struct igu_fifo_addr_data *curr_addr =
7092 			&s_igu_fifo_addr_data[i];
7093 
7094 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7095 		    curr_addr->end_addr)
7096 			found_addr = curr_addr;
7097 	}
7098 
7099 	if (!found_addr)
7100 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7101 
7102 	/* Prepare parsed address data */
7103 	switch (found_addr->type) {
7104 	case IGU_ADDR_TYPE_MSIX_MEM:
7105 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7106 		break;
7107 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7108 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7109 		sprintf(parsed_addr_data,
7110 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7111 		break;
7112 	default:
7113 		parsed_addr_data[0] = '\0';
7114 	}
7115 
7116 	if (!is_wr_cmd) {
7117 		parsed_wr_data[0] = '\0';
7118 		goto out;
7119 	}
7120 
7121 	/* Prepare parsed write data */
7122 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7123 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7124 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7125 
7126 	if (source == IGU_SRC_ATTN) {
7127 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7128 	} else {
7129 		if (is_cleanup) {
7130 			u8 cleanup_val, cleanup_type;
7131 
7132 			cleanup_val =
7133 				GET_FIELD(wr_data,
7134 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7135 			cleanup_type =
7136 			    GET_FIELD(wr_data,
7137 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7138 
7139 			sprintf(parsed_wr_data,
7140 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7141 				cleanup_val ? "set" : "clear",
7142 				cleanup_type);
7143 		} else {
7144 			u8 update_flag, en_dis_int_for_sb, segment;
7145 			u8 timer_mask;
7146 
7147 			update_flag = GET_FIELD(wr_data,
7148 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7149 			en_dis_int_for_sb =
7150 				GET_FIELD(wr_data,
7151 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7152 			segment = GET_FIELD(wr_data,
7153 					    IGU_FIFO_WR_DATA_SEGMENT);
7154 			timer_mask = GET_FIELD(wr_data,
7155 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7156 
7157 			sprintf(parsed_wr_data,
7158 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7159 				prod_cons,
7160 				update_flag ? "update" : "nop",
7161 				en_dis_int_for_sb ?
7162 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7163 				"enable",
7164 				segment ? "attn" : "regular",
7165 				timer_mask);
7166 		}
7167 	}
7168 out:
7169 	/* Add parsed element to parsed buffer */
7170 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7171 						   *results_offset),
7172 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7173 				   element->dword2, element->dword1,
7174 				   element->dword0,
7175 				   is_pf ? "pf" : "vf",
7176 				   GET_FIELD(element->dword0,
7177 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7178 				   s_igu_fifo_source_strs[source],
7179 				   is_wr_cmd ? "wr" : "rd",
7180 				   cmd_addr,
7181 				   (!is_pf && found_addr->vf_desc)
7182 				   ? found_addr->vf_desc
7183 				   : found_addr->desc,
7184 				   parsed_addr_data,
7185 				   parsed_wr_data,
7186 				   s_igu_fifo_error_strs[err_type]);
7187 
7188 	return DBG_STATUS_OK;
7189 }
7190 
7191 /* Parses an IGU FIFO dump buffer.
7192  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7193  * In any case, the required results buffer size is assigned to
7194  * parsed_results_bytes.
7195  * The parsing status is returned.
7196  */
7197 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7198 					       char *results_buf,
7199 					       u32 *parsed_results_bytes)
7200 {
7201 	const char *section_name, *param_name, *param_str_val;
7202 	u32 param_num_val, num_section_params, num_elements;
7203 	struct igu_fifo_element *elements;
7204 	enum dbg_status status;
7205 	u32 results_offset = 0;
7206 	u8 i;
7207 
7208 	/* Read global_params section */
7209 	dump_buf += qed_read_section_hdr(dump_buf,
7210 					 &section_name, &num_section_params);
7211 	if (strcmp(section_name, "global_params"))
7212 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7213 
7214 	/* Print global params */
7215 	dump_buf += qed_print_section_params(dump_buf,
7216 					     num_section_params,
7217 					     results_buf, &results_offset);
7218 
7219 	/* Read igu_fifo_data section */
7220 	dump_buf += qed_read_section_hdr(dump_buf,
7221 					 &section_name, &num_section_params);
7222 	if (strcmp(section_name, "igu_fifo_data"))
7223 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7224 	dump_buf += qed_read_param(dump_buf,
7225 				   &param_name, &param_str_val, &param_num_val);
7226 	if (strcmp(param_name, "size"))
7227 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7228 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7229 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7230 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7231 	elements = (struct igu_fifo_element *)dump_buf;
7232 
7233 	/* Decode elements */
7234 	for (i = 0; i < num_elements; i++) {
7235 		status = qed_parse_igu_fifo_element(&elements[i],
7236 						    results_buf,
7237 						    &results_offset);
7238 		if (status != DBG_STATUS_OK)
7239 			return status;
7240 	}
7241 
7242 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7243 						  results_offset),
7244 				  "fifo contained %d elements", num_elements);
7245 
7246 	/* Add 1 for string NULL termination */
7247 	*parsed_results_bytes = results_offset + 1;
7248 
7249 	return DBG_STATUS_OK;
7250 }
7251 
7252 static enum dbg_status
7253 qed_parse_protection_override_dump(u32 *dump_buf,
7254 				   char *results_buf,
7255 				   u32 *parsed_results_bytes)
7256 {
7257 	const char *section_name, *param_name, *param_str_val;
7258 	u32 param_num_val, num_section_params, num_elements;
7259 	struct protection_override_element *elements;
7260 	u32 results_offset = 0;
7261 	u8 i;
7262 
7263 	/* Read global_params section */
7264 	dump_buf += qed_read_section_hdr(dump_buf,
7265 					 &section_name, &num_section_params);
7266 	if (strcmp(section_name, "global_params"))
7267 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7268 
7269 	/* Print global params */
7270 	dump_buf += qed_print_section_params(dump_buf,
7271 					     num_section_params,
7272 					     results_buf, &results_offset);
7273 
7274 	/* Read protection_override_data section */
7275 	dump_buf += qed_read_section_hdr(dump_buf,
7276 					 &section_name, &num_section_params);
7277 	if (strcmp(section_name, "protection_override_data"))
7278 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7279 	dump_buf += qed_read_param(dump_buf,
7280 				   &param_name, &param_str_val, &param_num_val);
7281 	if (strcmp(param_name, "size"))
7282 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7283 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7284 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7285 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7286 	elements = (struct protection_override_element *)dump_buf;
7287 
7288 	/* Decode elements */
7289 	for (i = 0; i < num_elements; i++) {
7290 		u32 address = GET_FIELD(elements[i].data,
7291 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7292 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7293 
7294 		results_offset +=
7295 		    sprintf(qed_get_buf_ptr(results_buf,
7296 					    results_offset),
7297 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7298 			    i, address,
7299 			    (u32)GET_FIELD(elements[i].data,
7300 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7301 			    (u32)GET_FIELD(elements[i].data,
7302 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7303 			    (u32)GET_FIELD(elements[i].data,
7304 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7305 			    s_protection_strs[GET_FIELD(elements[i].data,
7306 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7307 			    s_protection_strs[GET_FIELD(elements[i].data,
7308 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7309 	}
7310 
7311 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7312 						  results_offset),
7313 				  "protection override contained %d elements",
7314 				  num_elements);
7315 
7316 	/* Add 1 for string NULL termination */
7317 	*parsed_results_bytes = results_offset + 1;
7318 
7319 	return DBG_STATUS_OK;
7320 }
7321 
7322 /* Parses a FW Asserts dump buffer.
7323  * If result_buf is not NULL, the FW Asserts results are printed to it.
7324  * In any case, the required results buffer size is assigned to
7325  * parsed_results_bytes.
7326  * The parsing status is returned.
7327  */
7328 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7329 						 char *results_buf,
7330 						 u32 *parsed_results_bytes)
7331 {
7332 	u32 num_section_params, param_num_val, i, results_offset = 0;
7333 	const char *param_name, *param_str_val, *section_name;
7334 	bool last_section_found = false;
7335 
7336 	*parsed_results_bytes = 0;
7337 
7338 	/* Read global_params section */
7339 	dump_buf += qed_read_section_hdr(dump_buf,
7340 					 &section_name, &num_section_params);
7341 	if (strcmp(section_name, "global_params"))
7342 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7343 
7344 	/* Print global params */
7345 	dump_buf += qed_print_section_params(dump_buf,
7346 					     num_section_params,
7347 					     results_buf, &results_offset);
7348 
7349 	while (!last_section_found) {
7350 		dump_buf += qed_read_section_hdr(dump_buf,
7351 						 &section_name,
7352 						 &num_section_params);
7353 		if (!strcmp(section_name, "fw_asserts")) {
7354 			/* Extract params */
7355 			const char *storm_letter = NULL;
7356 			u32 storm_dump_size = 0;
7357 
7358 			for (i = 0; i < num_section_params; i++) {
7359 				dump_buf += qed_read_param(dump_buf,
7360 							   &param_name,
7361 							   &param_str_val,
7362 							   &param_num_val);
7363 				if (!strcmp(param_name, "storm"))
7364 					storm_letter = param_str_val;
7365 				else if (!strcmp(param_name, "size"))
7366 					storm_dump_size = param_num_val;
7367 				else
7368 					return
7369 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7370 			}
7371 
7372 			if (!storm_letter || !storm_dump_size)
7373 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7374 
7375 			/* Print data */
7376 			results_offset +=
7377 			    sprintf(qed_get_buf_ptr(results_buf,
7378 						    results_offset),
7379 				    "\n%sSTORM_ASSERT: size=%d\n",
7380 				    storm_letter, storm_dump_size);
7381 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7382 				results_offset +=
7383 				    sprintf(qed_get_buf_ptr(results_buf,
7384 							    results_offset),
7385 					    "%08x\n", *dump_buf);
7386 		} else if (!strcmp(section_name, "last")) {
7387 			last_section_found = true;
7388 		} else {
7389 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7390 		}
7391 	}
7392 
7393 	/* Add 1 for string NULL termination */
7394 	*parsed_results_bytes = results_offset + 1;
7395 
7396 	return DBG_STATUS_OK;
7397 }
7398 
7399 /***************************** Public Functions *******************************/
7400 
7401 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7402 {
7403 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7404 	u8 buf_id;
7405 
7406 	/* Convert binary data to debug arrays */
7407 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7408 		s_user_dbg_arrays[buf_id].ptr =
7409 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7410 		s_user_dbg_arrays[buf_id].size_in_dwords =
7411 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7412 	}
7413 
7414 	return DBG_STATUS_OK;
7415 }
7416 
7417 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
7418 {
7419 	p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
7420 					GFP_KERNEL);
7421 	if (!p_hwfn->dbg_user_info)
7422 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7423 
7424 	return DBG_STATUS_OK;
7425 }
7426 
7427 const char *qed_dbg_get_status_str(enum dbg_status status)
7428 {
7429 	return (status <
7430 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7431 }
7432 
7433 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7434 						  u32 *dump_buf,
7435 						  u32 num_dumped_dwords,
7436 						  u32 *results_buf_size)
7437 {
7438 	u32 num_errors, num_warnings;
7439 
7440 	return qed_parse_idle_chk_dump(dump_buf,
7441 				       num_dumped_dwords,
7442 				       NULL,
7443 				       results_buf_size,
7444 				       &num_errors, &num_warnings);
7445 }
7446 
7447 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7448 					   u32 *dump_buf,
7449 					   u32 num_dumped_dwords,
7450 					   char *results_buf,
7451 					   u32 *num_errors,
7452 					   u32 *num_warnings)
7453 {
7454 	u32 parsed_buf_size;
7455 
7456 	return qed_parse_idle_chk_dump(dump_buf,
7457 				       num_dumped_dwords,
7458 				       results_buf,
7459 				       &parsed_buf_size,
7460 				       num_errors, num_warnings);
7461 }
7462 
7463 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7464 				     const u32 *meta_buf)
7465 {
7466 	struct dbg_tools_user_data *dev_user_data =
7467 		qed_dbg_get_user_data(p_hwfn);
7468 
7469 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7470 }
7471 
7472 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7473 						   u32 *dump_buf,
7474 						   u32 num_dumped_dwords,
7475 						   u32 *results_buf_size)
7476 {
7477 	return qed_parse_mcp_trace_dump(p_hwfn,
7478 					dump_buf, NULL, results_buf_size, true);
7479 }
7480 
7481 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7482 					    u32 *dump_buf,
7483 					    u32 num_dumped_dwords,
7484 					    char *results_buf)
7485 {
7486 	u32 parsed_buf_size;
7487 
7488 	return qed_parse_mcp_trace_dump(p_hwfn,
7489 					dump_buf,
7490 					results_buf, &parsed_buf_size, true);
7491 }
7492 
7493 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7494 						 u32 *dump_buf,
7495 						 char *results_buf)
7496 {
7497 	u32 parsed_buf_size;
7498 
7499 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7500 					&parsed_buf_size, false);
7501 }
7502 
7503 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7504 					 u8 *dump_buf,
7505 					 u32 num_dumped_bytes,
7506 					 char *results_buf)
7507 {
7508 	u32 parsed_results_bytes;
7509 
7510 	return qed_parse_mcp_trace_buf(p_hwfn,
7511 				       dump_buf,
7512 				       num_dumped_bytes,
7513 				       0,
7514 				       num_dumped_bytes,
7515 				       results_buf, &parsed_results_bytes);
7516 }
7517 
7518 /* Frees the specified MCP Trace meta data */
7519 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7520 {
7521 	struct dbg_tools_user_data *dev_user_data;
7522 	struct mcp_trace_meta *meta;
7523 	u32 i;
7524 
7525 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7526 	meta = &dev_user_data->mcp_trace_meta;
7527 	if (!meta->is_allocated)
7528 		return;
7529 
7530 	/* Release modules */
7531 	if (meta->modules) {
7532 		for (i = 0; i < meta->modules_num; i++)
7533 			kfree(meta->modules[i]);
7534 		kfree(meta->modules);
7535 	}
7536 
7537 	/* Release formats */
7538 	if (meta->formats) {
7539 		for (i = 0; i < meta->formats_num; i++)
7540 			kfree(meta->formats[i].format_str);
7541 		kfree(meta->formats);
7542 	}
7543 
7544 	meta->is_allocated = false;
7545 }
7546 
7547 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7548 						  u32 *dump_buf,
7549 						  u32 num_dumped_dwords,
7550 						  u32 *results_buf_size)
7551 {
7552 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7553 }
7554 
7555 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7556 					   u32 *dump_buf,
7557 					   u32 num_dumped_dwords,
7558 					   char *results_buf)
7559 {
7560 	u32 parsed_buf_size;
7561 
7562 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7563 }
7564 
7565 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7566 						  u32 *dump_buf,
7567 						  u32 num_dumped_dwords,
7568 						  u32 *results_buf_size)
7569 {
7570 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7571 }
7572 
7573 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7574 					   u32 *dump_buf,
7575 					   u32 num_dumped_dwords,
7576 					   char *results_buf)
7577 {
7578 	u32 parsed_buf_size;
7579 
7580 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7581 }
7582 
7583 enum dbg_status
7584 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7585 					     u32 *dump_buf,
7586 					     u32 num_dumped_dwords,
7587 					     u32 *results_buf_size)
7588 {
7589 	return qed_parse_protection_override_dump(dump_buf,
7590 						  NULL, results_buf_size);
7591 }
7592 
7593 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7594 						      u32 *dump_buf,
7595 						      u32 num_dumped_dwords,
7596 						      char *results_buf)
7597 {
7598 	u32 parsed_buf_size;
7599 
7600 	return qed_parse_protection_override_dump(dump_buf,
7601 						  results_buf,
7602 						  &parsed_buf_size);
7603 }
7604 
7605 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7606 						    u32 *dump_buf,
7607 						    u32 num_dumped_dwords,
7608 						    u32 *results_buf_size)
7609 {
7610 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7611 }
7612 
7613 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7614 					     u32 *dump_buf,
7615 					     u32 num_dumped_dwords,
7616 					     char *results_buf)
7617 {
7618 	u32 parsed_buf_size;
7619 
7620 	return qed_parse_fw_asserts_dump(dump_buf,
7621 					 results_buf, &parsed_buf_size);
7622 }
7623 
7624 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7625 				   struct dbg_attn_block_result *results)
7626 {
7627 	struct user_dbg_array *block_attn, *pstrings;
7628 	const u32 *block_attn_name_offsets;
7629 	enum dbg_attn_type attn_type;
7630 	const char *block_name;
7631 	u8 num_regs, i, j;
7632 
7633 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7634 	attn_type = (enum dbg_attn_type)
7635 		    GET_FIELD(results->data,
7636 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7637 	block_name = s_block_info_arr[results->block_id].name;
7638 
7639 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7640 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7641 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7642 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7643 
7644 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7645 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7646 
7647 	/* Go over registers with a non-zero attention status */
7648 	for (i = 0; i < num_regs; i++) {
7649 		struct dbg_attn_bit_mapping *bit_mapping;
7650 		struct dbg_attn_reg_result *reg_result;
7651 		u8 num_reg_attn, bit_idx = 0;
7652 
7653 		reg_result = &results->reg_results[i];
7654 		num_reg_attn = GET_FIELD(reg_result->data,
7655 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7656 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7657 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7658 				block_attn->ptr)[reg_result->block_attn_offset];
7659 
7660 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7661 
7662 		/* Go over attention status bits */
7663 		for (j = 0; j < num_reg_attn; j++) {
7664 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7665 						     DBG_ATTN_BIT_MAPPING_VAL);
7666 			const char *attn_name, *attn_type_str, *masked_str;
7667 			u32 attn_name_offset, sts_addr;
7668 
7669 			/* Check if bit mask should be advanced (due to unused
7670 			 * bits).
7671 			 */
7672 			if (GET_FIELD(bit_mapping[j].data,
7673 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7674 				bit_idx += (u8)attn_idx_val;
7675 				continue;
7676 			}
7677 
7678 			/* Check current bit index */
7679 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7680 				bit_idx++;
7681 				continue;
7682 			}
7683 
7684 			/* Find attention name */
7685 			attn_name_offset =
7686 				block_attn_name_offsets[attn_idx_val];
7687 			attn_name = &((const char *)
7688 				      pstrings->ptr)[attn_name_offset];
7689 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7690 					"Interrupt" : "Parity";
7691 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7692 				     " [masked]" : "";
7693 			sts_addr = GET_FIELD(reg_result->data,
7694 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7695 			DP_NOTICE(p_hwfn,
7696 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7697 				  block_name, attn_type_str, attn_name,
7698 				  sts_addr, bit_idx, masked_str);
7699 
7700 			bit_idx++;
7701 		}
7702 	}
7703 
7704 	return DBG_STATUS_OK;
7705 }
7706 
7707 /* Wrapper for unifying the idle_chk and mcp_trace api */
7708 static enum dbg_status
7709 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7710 				   u32 *dump_buf,
7711 				   u32 num_dumped_dwords,
7712 				   char *results_buf)
7713 {
7714 	u32 num_errors, num_warnnings;
7715 
7716 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7717 					  results_buf, &num_errors,
7718 					  &num_warnnings);
7719 }
7720 
7721 /* Feature meta data lookup table */
7722 static struct {
7723 	char *name;
7724 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7725 				    struct qed_ptt *p_ptt, u32 *size);
7726 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7727 					struct qed_ptt *p_ptt, u32 *dump_buf,
7728 					u32 buf_size, u32 *dumped_dwords);
7729 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7730 					 u32 *dump_buf, u32 num_dumped_dwords,
7731 					 char *results_buf);
7732 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7733 					    u32 *dump_buf,
7734 					    u32 num_dumped_dwords,
7735 					    u32 *results_buf_size);
7736 } qed_features_lookup[] = {
7737 	{
7738 	"grc", qed_dbg_grc_get_dump_buf_size,
7739 		    qed_dbg_grc_dump, NULL, NULL}, {
7740 	"idle_chk",
7741 		    qed_dbg_idle_chk_get_dump_buf_size,
7742 		    qed_dbg_idle_chk_dump,
7743 		    qed_print_idle_chk_results_wrapper,
7744 		    qed_get_idle_chk_results_buf_size}, {
7745 	"mcp_trace",
7746 		    qed_dbg_mcp_trace_get_dump_buf_size,
7747 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7748 		    qed_get_mcp_trace_results_buf_size}, {
7749 	"reg_fifo",
7750 		    qed_dbg_reg_fifo_get_dump_buf_size,
7751 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7752 		    qed_get_reg_fifo_results_buf_size}, {
7753 	"igu_fifo",
7754 		    qed_dbg_igu_fifo_get_dump_buf_size,
7755 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7756 		    qed_get_igu_fifo_results_buf_size}, {
7757 	"protection_override",
7758 		    qed_dbg_protection_override_get_dump_buf_size,
7759 		    qed_dbg_protection_override_dump,
7760 		    qed_print_protection_override_results,
7761 		    qed_get_protection_override_results_buf_size}, {
7762 	"fw_asserts",
7763 		    qed_dbg_fw_asserts_get_dump_buf_size,
7764 		    qed_dbg_fw_asserts_dump,
7765 		    qed_print_fw_asserts_results,
7766 		    qed_get_fw_asserts_results_buf_size},};
7767 
7768 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7769 {
7770 	u32 i, precision = 80;
7771 
7772 	if (!p_text_buf)
7773 		return;
7774 
7775 	pr_notice("\n%.*s", precision, p_text_buf);
7776 	for (i = precision; i < text_size; i += precision)
7777 		pr_cont("%.*s", precision, p_text_buf + i);
7778 	pr_cont("\n");
7779 }
7780 
7781 #define QED_RESULTS_BUF_MIN_SIZE 16
7782 /* Generic function for decoding debug feature info */
7783 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7784 				      enum qed_dbg_features feature_idx)
7785 {
7786 	struct qed_dbg_feature *feature =
7787 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7788 	u32 text_size_bytes, null_char_pos, i;
7789 	enum dbg_status rc;
7790 	char *text_buf;
7791 
7792 	/* Check if feature supports formatting capability */
7793 	if (!qed_features_lookup[feature_idx].results_buf_size)
7794 		return DBG_STATUS_OK;
7795 
7796 	/* Obtain size of formatted output */
7797 	rc = qed_features_lookup[feature_idx].
7798 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7799 				 feature->dumped_dwords, &text_size_bytes);
7800 	if (rc != DBG_STATUS_OK)
7801 		return rc;
7802 
7803 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7804 	null_char_pos = text_size_bytes - 1;
7805 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7806 
7807 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7808 		DP_NOTICE(p_hwfn->cdev,
7809 			  "formatted size of feature was too small %d. Aborting\n",
7810 			  text_size_bytes);
7811 		return DBG_STATUS_INVALID_ARGS;
7812 	}
7813 
7814 	/* Allocate temp text buf */
7815 	text_buf = vzalloc(text_size_bytes);
7816 	if (!text_buf)
7817 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7818 
7819 	/* Decode feature opcodes to string on temp buf */
7820 	rc = qed_features_lookup[feature_idx].
7821 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7822 			      feature->dumped_dwords, text_buf);
7823 	if (rc != DBG_STATUS_OK) {
7824 		vfree(text_buf);
7825 		return rc;
7826 	}
7827 
7828 	/* Replace the original null character with a '\n' character.
7829 	 * The bytes that were added as a result of the dword alignment are also
7830 	 * padded with '\n' characters.
7831 	 */
7832 	for (i = null_char_pos; i < text_size_bytes; i++)
7833 		text_buf[i] = '\n';
7834 
7835 	/* Dump printable feature to log */
7836 	if (p_hwfn->cdev->dbg_params.print_data)
7837 		qed_dbg_print_feature(text_buf, text_size_bytes);
7838 
7839 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7840 	 * and formatted text buffer.
7841 	 */
7842 	vfree(feature->dump_buf);
7843 	feature->dump_buf = text_buf;
7844 	feature->buf_size = text_size_bytes;
7845 	feature->dumped_dwords = text_size_bytes / 4;
7846 	return rc;
7847 }
7848 
7849 /* Generic function for performing the dump of a debug feature. */
7850 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7851 				    struct qed_ptt *p_ptt,
7852 				    enum qed_dbg_features feature_idx)
7853 {
7854 	struct qed_dbg_feature *feature =
7855 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7856 	u32 buf_size_dwords;
7857 	enum dbg_status rc;
7858 
7859 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7860 		  qed_features_lookup[feature_idx].name);
7861 
7862 	/* Dump_buf was already allocated need to free (this can happen if dump
7863 	 * was called but file was never read).
7864 	 * We can't use the buffer as is since size may have changed.
7865 	 */
7866 	if (feature->dump_buf) {
7867 		vfree(feature->dump_buf);
7868 		feature->dump_buf = NULL;
7869 	}
7870 
7871 	/* Get buffer size from hsi, allocate accordingly, and perform the
7872 	 * dump.
7873 	 */
7874 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7875 						       &buf_size_dwords);
7876 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7877 		return rc;
7878 	feature->buf_size = buf_size_dwords * sizeof(u32);
7879 	feature->dump_buf = vmalloc(feature->buf_size);
7880 	if (!feature->dump_buf)
7881 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7882 
7883 	rc = qed_features_lookup[feature_idx].
7884 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7885 			     feature->buf_size / sizeof(u32),
7886 			     &feature->dumped_dwords);
7887 
7888 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7889 	 * In this case the buffer holds valid binary data, but we wont able
7890 	 * to parse it (since parsing relies on data in NVRAM which is only
7891 	 * accessible when MFW is responsive). skip the formatting but return
7892 	 * success so that binary data is provided.
7893 	 */
7894 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7895 		return DBG_STATUS_OK;
7896 
7897 	if (rc != DBG_STATUS_OK)
7898 		return rc;
7899 
7900 	/* Format output */
7901 	rc = format_feature(p_hwfn, feature_idx);
7902 	return rc;
7903 }
7904 
7905 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7906 {
7907 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7908 }
7909 
7910 int qed_dbg_grc_size(struct qed_dev *cdev)
7911 {
7912 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7913 }
7914 
7915 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7916 {
7917 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7918 			       num_dumped_bytes);
7919 }
7920 
7921 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7922 {
7923 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7924 }
7925 
7926 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7927 {
7928 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7929 			       num_dumped_bytes);
7930 }
7931 
7932 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7933 {
7934 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7935 }
7936 
7937 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7938 {
7939 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7940 			       num_dumped_bytes);
7941 }
7942 
7943 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7944 {
7945 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7946 }
7947 
7948 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7949 				    enum qed_nvm_images image_id, u32 *length)
7950 {
7951 	struct qed_nvm_image_att image_att;
7952 	int rc;
7953 
7954 	*length = 0;
7955 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7956 	if (rc)
7957 		return rc;
7958 
7959 	*length = image_att.length;
7960 
7961 	return rc;
7962 }
7963 
7964 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7965 			     u32 *num_dumped_bytes,
7966 			     enum qed_nvm_images image_id)
7967 {
7968 	struct qed_hwfn *p_hwfn =
7969 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7970 	u32 len_rounded, i;
7971 	__be32 val;
7972 	int rc;
7973 
7974 	*num_dumped_bytes = 0;
7975 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7976 	if (rc)
7977 		return rc;
7978 
7979 	DP_NOTICE(p_hwfn->cdev,
7980 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7981 		  image_id);
7982 
7983 	len_rounded = roundup(len_rounded, sizeof(u32));
7984 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7985 	if (rc)
7986 		return rc;
7987 
7988 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7989 	if (image_id != QED_NVM_IMAGE_NVM_META)
7990 		for (i = 0; i < len_rounded; i += 4) {
7991 			val = cpu_to_be32(*(u32 *)(buffer + i));
7992 			*(u32 *)(buffer + i) = val;
7993 		}
7994 
7995 	*num_dumped_bytes = len_rounded;
7996 
7997 	return rc;
7998 }
7999 
8000 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8001 				u32 *num_dumped_bytes)
8002 {
8003 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8004 			       num_dumped_bytes);
8005 }
8006 
8007 int qed_dbg_protection_override_size(struct qed_dev *cdev)
8008 {
8009 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8010 }
8011 
8012 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8013 		       u32 *num_dumped_bytes)
8014 {
8015 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8016 			       num_dumped_bytes);
8017 }
8018 
8019 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8020 {
8021 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8022 }
8023 
8024 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8025 		      u32 *num_dumped_bytes)
8026 {
8027 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8028 			       num_dumped_bytes);
8029 }
8030 
8031 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8032 {
8033 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8034 }
8035 
8036 /* Defines the amount of bytes allocated for recording the length of debugfs
8037  * feature buffer.
8038  */
8039 #define REGDUMP_HEADER_SIZE			sizeof(u32)
8040 #define REGDUMP_HEADER_FEATURE_SHIFT		24
8041 #define REGDUMP_HEADER_ENGINE_SHIFT		31
8042 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
8043 enum debug_print_features {
8044 	OLD_MODE = 0,
8045 	IDLE_CHK = 1,
8046 	GRC_DUMP = 2,
8047 	MCP_TRACE = 3,
8048 	REG_FIFO = 4,
8049 	PROTECTION_OVERRIDE = 5,
8050 	IGU_FIFO = 6,
8051 	PHY = 7,
8052 	FW_ASSERTS = 8,
8053 	NVM_CFG1 = 9,
8054 	DEFAULT_CFG = 10,
8055 	NVM_META = 11,
8056 };
8057 
8058 static u32 qed_calc_regdump_header(enum debug_print_features feature,
8059 				   int engine, u32 feature_size, u8 omit_engine)
8060 {
8061 	/* Insert the engine, feature and mode inside the header and combine it
8062 	 * with feature size.
8063 	 */
8064 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
8065 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
8066 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
8067 }
8068 
8069 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8070 {
8071 	u8 cur_engine, omit_engine = 0, org_engine;
8072 	struct qed_hwfn *p_hwfn =
8073 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8074 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8075 	int grc_params[MAX_DBG_GRC_PARAMS], i;
8076 	u32 offset = 0, feature_size;
8077 	int rc;
8078 
8079 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8080 		grc_params[i] = dev_data->grc.param_val[i];
8081 
8082 	if (cdev->num_hwfns == 1)
8083 		omit_engine = 1;
8084 
8085 	org_engine = qed_get_debug_engine(cdev);
8086 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8087 		/* Collect idle_chks and grcDump for each hw function */
8088 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8089 			   "obtaining idle_chk and grcdump for current engine\n");
8090 		qed_set_debug_engine(cdev, cur_engine);
8091 
8092 		/* First idle_chk */
8093 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8094 				      REGDUMP_HEADER_SIZE, &feature_size);
8095 		if (!rc) {
8096 			*(u32 *)((u8 *)buffer + offset) =
8097 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8098 						    feature_size, omit_engine);
8099 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8100 		} else {
8101 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8102 		}
8103 
8104 		/* Second idle_chk */
8105 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8106 				      REGDUMP_HEADER_SIZE, &feature_size);
8107 		if (!rc) {
8108 			*(u32 *)((u8 *)buffer + offset) =
8109 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8110 						    feature_size, omit_engine);
8111 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8112 		} else {
8113 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8114 		}
8115 
8116 		/* reg_fifo dump */
8117 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8118 				      REGDUMP_HEADER_SIZE, &feature_size);
8119 		if (!rc) {
8120 			*(u32 *)((u8 *)buffer + offset) =
8121 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
8122 						    feature_size, omit_engine);
8123 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8124 		} else {
8125 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8126 		}
8127 
8128 		/* igu_fifo dump */
8129 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8130 				      REGDUMP_HEADER_SIZE, &feature_size);
8131 		if (!rc) {
8132 			*(u32 *)((u8 *)buffer + offset) =
8133 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
8134 						    feature_size, omit_engine);
8135 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8136 		} else {
8137 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8138 		}
8139 
8140 		/* protection_override dump */
8141 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8142 						 REGDUMP_HEADER_SIZE,
8143 						 &feature_size);
8144 		if (!rc) {
8145 			*(u32 *)((u8 *)buffer + offset) =
8146 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
8147 						    cur_engine,
8148 						    feature_size, omit_engine);
8149 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8150 		} else {
8151 			DP_ERR(cdev,
8152 			       "qed_dbg_protection_override failed. rc = %d\n",
8153 			       rc);
8154 		}
8155 
8156 		/* fw_asserts dump */
8157 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8158 					REGDUMP_HEADER_SIZE, &feature_size);
8159 		if (!rc) {
8160 			*(u32 *)((u8 *)buffer + offset) =
8161 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8162 						    feature_size, omit_engine);
8163 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8164 		} else {
8165 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8166 			       rc);
8167 		}
8168 
8169 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8170 			dev_data->grc.param_val[i] = grc_params[i];
8171 
8172 		/* GRC dump - must be last because when mcp stuck it will
8173 		 * clutter idle_chk, reg_fifo, ...
8174 		 */
8175 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8176 				 REGDUMP_HEADER_SIZE, &feature_size);
8177 		if (!rc) {
8178 			*(u32 *)((u8 *)buffer + offset) =
8179 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
8180 						    feature_size, omit_engine);
8181 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8182 		} else {
8183 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8184 		}
8185 	}
8186 
8187 	qed_set_debug_engine(cdev, org_engine);
8188 	/* mcp_trace */
8189 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8190 			       REGDUMP_HEADER_SIZE, &feature_size);
8191 	if (!rc) {
8192 		*(u32 *)((u8 *)buffer + offset) =
8193 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
8194 					    feature_size, omit_engine);
8195 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8196 	} else {
8197 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8198 	}
8199 
8200 	/* nvm cfg1 */
8201 	rc = qed_dbg_nvm_image(cdev,
8202 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8203 			       &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8204 	if (!rc) {
8205 		*(u32 *)((u8 *)buffer + offset) =
8206 		    qed_calc_regdump_header(NVM_CFG1, cur_engine,
8207 					    feature_size, omit_engine);
8208 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8209 	} else if (rc != -ENOENT) {
8210 		DP_ERR(cdev,
8211 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8212 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8213 	}
8214 
8215 	/* nvm default */
8216 	rc = qed_dbg_nvm_image(cdev,
8217 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8218 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8219 	if (!rc) {
8220 		*(u32 *)((u8 *)buffer + offset) =
8221 		    qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8222 					    feature_size, omit_engine);
8223 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8224 	} else if (rc != -ENOENT) {
8225 		DP_ERR(cdev,
8226 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8227 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8228 		       rc);
8229 	}
8230 
8231 	/* nvm meta */
8232 	rc = qed_dbg_nvm_image(cdev,
8233 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8234 			       &feature_size, QED_NVM_IMAGE_NVM_META);
8235 	if (!rc) {
8236 		*(u32 *)((u8 *)buffer + offset) =
8237 		    qed_calc_regdump_header(NVM_META, cur_engine,
8238 					    feature_size, omit_engine);
8239 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8240 	} else if (rc != -ENOENT) {
8241 		DP_ERR(cdev,
8242 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8243 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8244 	}
8245 
8246 	return 0;
8247 }
8248 
8249 int qed_dbg_all_data_size(struct qed_dev *cdev)
8250 {
8251 	struct qed_hwfn *p_hwfn =
8252 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8253 	u32 regs_len = 0, image_len = 0;
8254 	u8 cur_engine, org_engine;
8255 
8256 	org_engine = qed_get_debug_engine(cdev);
8257 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8258 		/* Engine specific */
8259 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8260 			   "calculating idle_chk and grcdump register length for current engine\n");
8261 		qed_set_debug_engine(cdev, cur_engine);
8262 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8263 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8264 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8265 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8266 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8267 			    REGDUMP_HEADER_SIZE +
8268 			    qed_dbg_protection_override_size(cdev) +
8269 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8270 	}
8271 
8272 	qed_set_debug_engine(cdev, org_engine);
8273 
8274 	/* Engine common */
8275 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8276 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8277 	if (image_len)
8278 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8279 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8280 	if (image_len)
8281 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8282 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8283 	if (image_len)
8284 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8285 
8286 	return regs_len;
8287 }
8288 
8289 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8290 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8291 {
8292 	struct qed_hwfn *p_hwfn =
8293 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8294 	struct qed_dbg_feature *qed_feature =
8295 		&cdev->dbg_params.features[feature];
8296 	enum dbg_status dbg_rc;
8297 	struct qed_ptt *p_ptt;
8298 	int rc = 0;
8299 
8300 	/* Acquire ptt */
8301 	p_ptt = qed_ptt_acquire(p_hwfn);
8302 	if (!p_ptt)
8303 		return -EINVAL;
8304 
8305 	/* Get dump */
8306 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8307 	if (dbg_rc != DBG_STATUS_OK) {
8308 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8309 			   qed_dbg_get_status_str(dbg_rc));
8310 		*num_dumped_bytes = 0;
8311 		rc = -EINVAL;
8312 		goto out;
8313 	}
8314 
8315 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8316 		   "copying debugfs feature to external buffer\n");
8317 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8318 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8319 			    4;
8320 
8321 out:
8322 	qed_ptt_release(p_hwfn, p_ptt);
8323 	return rc;
8324 }
8325 
8326 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8327 {
8328 	struct qed_hwfn *p_hwfn =
8329 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8330 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8331 	struct qed_dbg_feature *qed_feature =
8332 		&cdev->dbg_params.features[feature];
8333 	u32 buf_size_dwords;
8334 	enum dbg_status rc;
8335 
8336 	if (!p_ptt)
8337 		return -EINVAL;
8338 
8339 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8340 						   &buf_size_dwords);
8341 	if (rc != DBG_STATUS_OK)
8342 		buf_size_dwords = 0;
8343 
8344 	qed_ptt_release(p_hwfn, p_ptt);
8345 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8346 	return qed_feature->buf_size;
8347 }
8348 
8349 u8 qed_get_debug_engine(struct qed_dev *cdev)
8350 {
8351 	return cdev->dbg_params.engine_for_debug;
8352 }
8353 
8354 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8355 {
8356 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8357 		   engine_number);
8358 	cdev->dbg_params.engine_for_debug = engine_number;
8359 }
8360 
8361 void qed_dbg_pf_init(struct qed_dev *cdev)
8362 {
8363 	const u8 *dbg_values;
8364 
8365 	/* Debug values are after init values.
8366 	 * The offset is the first dword of the file.
8367 	 */
8368 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8369 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8370 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8371 }
8372 
8373 void qed_dbg_pf_exit(struct qed_dev *cdev)
8374 {
8375 	struct qed_dbg_feature *feature = NULL;
8376 	enum qed_dbg_features feature_idx;
8377 
8378 	/* Debug features' buffers may be allocated if debug feature was used
8379 	 * but dump wasn't called.
8380 	 */
8381 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8382 		feature = &cdev->dbg_params.features[feature_idx];
8383 		if (feature->dump_buf) {
8384 			vfree(feature->dump_buf);
8385 			feature->dump_buf = NULL;
8386 		}
8387 	}
8388 }
8389