1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_dbg_fw_funcs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "bcm_osal.h"
35 #include "ecore.h"
36 #include "ecore_hw.h"
37 #include "ecore_mcp.h"
38 #include "spad_layout.h"
39 #include "nvm_map.h"
40 #include "reg_addr.h"
41 #include "ecore_hsi_common.h"
42 #include "ecore_hsi_debug_tools.h"
43 #include "mcp_public.h"
44 #include "nvm_map.h"
45 #ifndef USE_DBG_BIN_FILE
46 #include "ecore_dbg_values.h"
47 #endif
48 #include "ecore_dbg_fw_funcs.h"
49 
50 /* Memory groups enum */
51 enum mem_groups {
52 	MEM_GROUP_PXP_MEM,
53 	MEM_GROUP_DMAE_MEM,
54 	MEM_GROUP_CM_MEM,
55 	MEM_GROUP_QM_MEM,
56 	MEM_GROUP_TM_MEM,
57 	MEM_GROUP_BRB_RAM,
58 	MEM_GROUP_BRB_MEM,
59 	MEM_GROUP_PRS_MEM,
60 	MEM_GROUP_SDM_MEM,
61 	MEM_GROUP_IOR,
62 	MEM_GROUP_RAM,
63 	MEM_GROUP_BTB_RAM,
64 	MEM_GROUP_CONN_CFC_MEM,
65 	MEM_GROUP_TASK_CFC_MEM,
66 	MEM_GROUP_CAU_PI,
67 	MEM_GROUP_CAU_MEM,
68 	MEM_GROUP_PXP_ILT,
69 	MEM_GROUP_PBUF,
70 	MEM_GROUP_MULD_MEM,
71 	MEM_GROUP_BTB_MEM,
72 	MEM_GROUP_RDIF_CTX,
73 	MEM_GROUP_TDIF_CTX,
74 	MEM_GROUP_CFC_MEM,
75 	MEM_GROUP_IGU_MEM,
76 	MEM_GROUP_IGU_MSIX,
77 	MEM_GROUP_CAU_SB,
78 	MEM_GROUP_BMB_RAM,
79 	MEM_GROUP_BMB_MEM,
80 	MEM_GROUPS_NUM
81 };
82 
83 /* Memory groups names */
84 static const char* s_mem_group_names[] = {
85 	"PXP_MEM",
86 	"DMAE_MEM",
87 	"CM_MEM",
88 	"QM_MEM",
89 	"TM_MEM",
90 	"BRB_RAM",
91 	"BRB_MEM",
92 	"PRS_MEM",
93 	"SDM_MEM",
94 	"IOR",
95 	"RAM",
96 	"BTB_RAM",
97 	"CONN_CFC_MEM",
98 	"TASK_CFC_MEM",
99 	"CAU_PI",
100 	"CAU_MEM",
101 	"PXP_ILT",
102 	"PBUF",
103 	"MULD_MEM",
104 	"BTB_MEM",
105 	"RDIF_CTX",
106 	"TDIF_CTX",
107 	"CFC_MEM",
108 	"IGU_MEM",
109 	"IGU_MSIX",
110 	"CAU_SB",
111 	"BMB_RAM",
112 	"BMB_MEM",
113 };
114 
115 /* Idle check conditions */
116 
117 #ifndef __PREVENT_COND_ARR__
118 
119 static u32 cond5(const u32 *r, const u32 *imm) {
120 	return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
121 }
122 
123 static u32 cond7(const u32 *r, const u32 *imm) {
124 	return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
125 }
126 
127 static u32 cond14(const u32 *r, const u32 *imm) {
128 	return ((r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]));
129 }
130 
131 static u32 cond6(const u32 *r, const u32 *imm) {
132 	return ((r[0] & imm[0]) != imm[1]);
133 }
134 
135 static u32 cond9(const u32 *r, const u32 *imm) {
136 	return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
137 }
138 
139 static u32 cond10(const u32 *r, const u32 *imm) {
140 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
141 }
142 
143 static u32 cond4(const u32 *r, const u32 *imm) {
144 	return ((r[0] & ~imm[0]) != imm[1]);
145 }
146 
147 static u32 cond0(const u32 *r, const u32 *imm) {
148 	return ((r[0] & ~r[1]) != imm[0]);
149 }
150 
151 static u32 cond1(const u32 *r, const u32 *imm) {
152 	return (r[0] != imm[0]);
153 }
154 
155 static u32 cond11(const u32 *r, const u32 *imm) {
156 	return (r[0] != r[1] && r[2] == imm[0]);
157 }
158 
159 static u32 cond12(const u32 *r, const u32 *imm) {
160 	return (r[0] != r[1] && r[2] > imm[0]);
161 }
162 
163 static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
164 	return (r[0] != r[1]);
165 }
166 
167 static u32 cond13(const u32 *r, const u32 *imm) {
168 	return (r[0] & imm[0]);
169 }
170 
171 static u32 cond8(const u32 *r, const u32 *imm) {
172 	return (r[0] < (r[1] - imm[0]));
173 }
174 
175 static u32 cond2(const u32 *r, const u32 *imm) {
176 	return (r[0] > imm[0]);
177 }
178 
179 /* Array of Idle Check conditions */
180 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
181 	cond0,
182 	cond1,
183 	cond2,
184 	cond3,
185 	cond4,
186 	cond5,
187 	cond6,
188 	cond7,
189 	cond8,
190 	cond9,
191 	cond10,
192 	cond11,
193 	cond12,
194 	cond13,
195 	cond14,
196 };
197 
198 #endif /* __PREVENT_COND_ARR__ */
199 
200 
201 /******************************* Data Types **********************************/
202 
203 enum platform_ids {
204 	PLATFORM_ASIC,
205 	PLATFORM_EMUL_FULL,
206 	PLATFORM_EMUL_REDUCED,
207 	PLATFORM_FPGA,
208 	MAX_PLATFORM_IDS
209 };
210 
211 struct chip_platform_defs {
212 	u8 num_ports;
213 	u8 num_pfs;
214 	u8 num_vfs;
215 };
216 
217 /* Chip constant definitions */
218 struct chip_defs {
219 	const char *name;
220 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
221 };
222 
223 /* Platform constant definitions */
224 struct platform_defs {
225 	const char *name;
226 	u32 delay_factor;
227 };
228 
229 /* Storm constant definitions.
230  * Addresses are in bytes, sizes are in quad-regs.
231  */
232 struct storm_defs {
233 	char letter;
234 	enum block_id block_id;
235 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
236 	bool has_vfc;
237 	u32 sem_fast_mem_addr;
238 	u32 sem_frame_mode_addr;
239 	u32 sem_slow_enable_addr;
240 	u32 sem_slow_mode_addr;
241 	u32 sem_slow_mode1_conf_addr;
242 	u32 sem_sync_dbg_empty_addr;
243 	u32 sem_slow_dbg_empty_addr;
244 	u32 cm_ctx_wr_addr;
245 	u32 cm_conn_ag_ctx_lid_size;
246 	u32 cm_conn_ag_ctx_rd_addr;
247 	u32 cm_conn_st_ctx_lid_size;
248 	u32 cm_conn_st_ctx_rd_addr;
249 	u32 cm_task_ag_ctx_lid_size;
250 	u32 cm_task_ag_ctx_rd_addr;
251 	u32 cm_task_st_ctx_lid_size;
252 	u32 cm_task_st_ctx_rd_addr;
253 };
254 
255 /* Block constant definitions */
256 struct block_defs {
257 	const char *name;
258 	bool exists[MAX_CHIP_IDS];
259 	bool associated_to_storm;
260 
261 	/* Valid only if associated_to_storm is true */
262 	u32 storm_id;
263 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
264 	u32 dbg_select_addr;
265 	u32 dbg_enable_addr;
266 	u32 dbg_shift_addr;
267 	u32 dbg_force_valid_addr;
268 	u32 dbg_force_frame_addr;
269 	bool has_reset_bit;
270 
271 	/* If true, block is taken out of reset before dump */
272 	bool unreset;
273 	enum dbg_reset_regs reset_reg;
274 
275 	/* Bit offset in reset register */
276 	u8 reset_bit_offset;
277 };
278 
279 /* Reset register definitions */
280 struct reset_reg_defs {
281 	u32 addr;
282 	bool exists[MAX_CHIP_IDS];
283 	u32 unreset_val[MAX_CHIP_IDS];
284 };
285 
286 /* Debug Bus Constraint operation constant definitions */
287 struct dbg_bus_constraint_op_defs {
288 	u8 hw_op_val;
289 	bool is_cyclic;
290 };
291 
292 /* Storm Mode definitions */
293 struct storm_mode_defs {
294 	const char *name;
295 	bool is_fast_dbg;
296 	u8 id_in_hw;
297 };
298 
299 struct grc_param_defs {
300 	u32 default_val[MAX_CHIP_IDS];
301 	u32 min;
302 	u32 max;
303 	bool is_preset;
304 	u32 exclude_all_preset_val;
305 	u32 crash_preset_val;
306 };
307 
308 /* address is in 128b units. Width is in bits. */
309 struct rss_mem_defs {
310 	const char *mem_name;
311 	const char *type_name;
312 	u32 addr;
313 	u32 entry_width;
314 	u32 num_entries[MAX_CHIP_IDS];
315 };
316 
317 struct vfc_ram_defs {
318 	const char *mem_name;
319 	const char *type_name;
320 	u32 base_row;
321 	u32 num_rows;
322 };
323 
324 struct big_ram_defs {
325 	const char *instance_name;
326 	enum mem_groups mem_group_id;
327 	enum mem_groups ram_mem_group_id;
328 	enum dbg_grc_params grc_param;
329 	u32 addr_reg_addr;
330 	u32 data_reg_addr;
331 	u32 num_of_blocks[MAX_CHIP_IDS];
332 };
333 
334 struct phy_defs {
335 	const char *phy_name;
336 
337 	/* PHY base GRC address */
338 	u32 base_addr;
339 
340 	/* Relative address of indirect TBUS address register (bits 0..7) */
341 	u32 tbus_addr_lo_addr;
342 
343 	/* Relative address of indirect TBUS address register (bits 8..10) */
344 	u32 tbus_addr_hi_addr;
345 
346 	/* Relative address of indirect TBUS data register (bits 0..7) */
347 	u32 tbus_data_lo_addr;
348 
349 	/* Relative address of indirect TBUS data register (bits 8..11) */
350 	u32 tbus_data_hi_addr;
351 };
352 
353 /******************************** Constants **********************************/
354 
355 #define MAX_LCIDS			320
356 #define MAX_LTIDS			320
357 
358 #define NUM_IOR_SETS			2
359 #define IORS_PER_SET			176
360 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
361 
362 #define BYTES_IN_DWORD			sizeof(u32)
363 
364 /* Cyclic  right */
365 #define SHR(val, val_width, amount)	(((val) | ((val) << (val_width))) 					>> (amount)) & ((1 << (val_width)) - 1)
366 
367 /* In the macros below, size and offset are specified in bits */
368 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
369 #define FIELD_BIT_OFFSET(type, field)	type##_##field##_##OFFSET
370 #define FIELD_BIT_SIZE(type, field)	type##_##field##_##SIZE
371 #define FIELD_DWORD_OFFSET(type, field)		(int)(FIELD_BIT_OFFSET(type, field) / 32)
372 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
373 #define FIELD_BIT_MASK(type, field)		(((1 << FIELD_BIT_SIZE(type, field)) - 1) 	<< FIELD_DWORD_SHIFT(type, field))
374 
375 #define SET_VAR_FIELD(var, type, field, val) 	var[FIELD_DWORD_OFFSET(type, field)] &= 		(~FIELD_BIT_MASK(type, field)); 	var[FIELD_DWORD_OFFSET(type, field)] |= 		(val) << FIELD_DWORD_SHIFT(type, field)
376 
377 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		ecore_wr(dev, ptt, addr, (arr)[i])
378 
379 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		(arr)[i] = ecore_rd(dev, ptt, addr)
380 
381 #define CHECK_ARR_SIZE(arr, size) 	OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
382 
383 #ifndef DWORDS_TO_BYTES
384 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
385 #endif
386 #ifndef BYTES_TO_DWORDS
387 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
388 #endif
389 
390 /* extra lines include a signature line + optional latency events line */
391 #ifndef NUM_DBG_LINES
392 #define NUM_EXTRA_DBG_LINES(block_desc)		(1 + (block_desc->has_latency_events ? 1 : 0))
393 #define NUM_DBG_LINES(block_desc)		(block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
394 #endif
395 
396 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
397 #define RAM_LINES_TO_BYTES(lines)		DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
398 
399 #define REG_DUMP_LEN_SHIFT		24
400 #define MEM_DUMP_ENTRY_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
401 
402 #define IDLE_CHK_RULE_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
403 
404 #define IDLE_CHK_RESULT_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
405 
406 #define IDLE_CHK_RESULT_REG_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
407 
408 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
409 
410 /* The sizes and offsets below are specified in bits */
411 #define VFC_CAM_CMD_STRUCT_SIZE		64
412 #define VFC_CAM_CMD_ROW_OFFSET		48
413 #define VFC_CAM_CMD_ROW_SIZE		9
414 #define VFC_CAM_ADDR_STRUCT_SIZE	16
415 #define VFC_CAM_ADDR_OP_OFFSET		0
416 #define VFC_CAM_ADDR_OP_SIZE		4
417 #define VFC_CAM_RESP_STRUCT_SIZE	256
418 #define VFC_RAM_ADDR_STRUCT_SIZE	16
419 #define VFC_RAM_ADDR_OP_OFFSET		0
420 #define VFC_RAM_ADDR_OP_SIZE		2
421 #define VFC_RAM_ADDR_ROW_OFFSET		2
422 #define VFC_RAM_ADDR_ROW_SIZE		10
423 #define VFC_RAM_RESP_STRUCT_SIZE	256
424 
425 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
426 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
427 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
428 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
429 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
430 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
431 
432 #define NUM_VFC_RAM_TYPES		4
433 
434 #define VFC_CAM_NUM_ROWS		512
435 
436 #define VFC_OPCODE_CAM_RD		14
437 #define VFC_OPCODE_RAM_RD		0
438 
439 #define NUM_RSS_MEM_TYPES		5
440 
441 #define NUM_BIG_RAM_TYPES		3
442 #define BIG_RAM_BLOCK_SIZE_BYTES	128
443 #define BIG_RAM_BLOCK_SIZE_DWORDS		BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
444 
445 #define NUM_PHY_TBUS_ADDRESSES		2048
446 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
447 
448 #define SEM_FAST_MODE6_SRC_ENABLE	0x10
449 #define SEM_FAST_MODE6_SRC_DISABLE	0x3f
450 
451 #define SEM_SLOW_MODE1_DATA_ENABLE	0x1
452 
453 #define VALUES_PER_CYCLE		4
454 #define MAX_CYCLE_VALUES_MASK		((1 << VALUES_PER_CYCLE) - 1)
455 
456 #define MAX_DWORDS_PER_CYCLE		8
457 
458 #define HW_ID_BITS			3
459 
460 #define NUM_CALENDAR_SLOTS		16
461 
462 #define MAX_TRIGGER_STATES		3
463 #define TRIGGER_SETS_PER_STATE		2
464 #define MAX_CONSTRAINTS			4
465 
466 #define SEM_FILTER_CID_EN_MASK		0x008
467 #define SEM_FILTER_EID_MASK_EN_MASK	0x010
468 #define SEM_FILTER_EID_RANGE_EN_MASK	0x110
469 
470 #define CHUNK_SIZE_IN_DWORDS		64
471 #define CHUNK_SIZE_IN_BYTES		DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
472 
473 #define INT_BUF_NUM_OF_LINES		192
474 #define INT_BUF_LINE_SIZE_IN_DWORDS	16
475 #define INT_BUF_SIZE_IN_DWORDS			(INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
476 #define INT_BUF_SIZE_IN_CHUNKS			(INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
477 
478 #define PCI_BUF_LINE_SIZE_IN_DWORDS	8
479 #define PCI_BUF_LINE_SIZE_IN_BYTES		DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
480 
481 #define TARGET_EN_MASK_PCI		0x3
482 #define TARGET_EN_MASK_NIG		0x4
483 
484 #define PCI_REQ_CREDIT			1
485 #define PCI_PHYS_ADDR_TYPE		0
486 
487 #define OPAQUE_FID(pci_func)		((pci_func << 4) | 0xff00)
488 
489 #define RESET_REG_UNRESET_OFFSET	4
490 
491 #define PCI_PKT_SIZE_IN_CHUNKS		1
492 #define PCI_PKT_SIZE_IN_BYTES			(PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
493 
494 #define NIG_PKT_SIZE_IN_CHUNKS		4
495 
496 #define FLUSH_DELAY_MS			500
497 #define STALL_DELAY_MS			500
498 
499 #define SRC_MAC_ADDR_LO16		0x0a0b
500 #define SRC_MAC_ADDR_HI32		0x0c0d0e0f
501 #define ETH_TYPE			0x1000
502 
503 #define STATIC_DEBUG_LINE_DWORDS	9
504 
505 #define NUM_COMMON_GLOBAL_PARAMS	8
506 
507 #define FW_IMG_KUKU			0
508 #define FW_IMG_MAIN			1
509 #define FW_IMG_L2B			2
510 
511 #ifndef REG_FIFO_ELEMENT_DWORDS
512 #define REG_FIFO_ELEMENT_DWORDS		2
513 #endif
514 #define REG_FIFO_DEPTH_ELEMENTS		32
515 #define REG_FIFO_DEPTH_DWORDS			(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
516 
517 #ifndef IGU_FIFO_ELEMENT_DWORDS
518 #define IGU_FIFO_ELEMENT_DWORDS		4
519 #endif
520 #define IGU_FIFO_DEPTH_ELEMENTS		64
521 #define IGU_FIFO_DEPTH_DWORDS			(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
522 
523 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS	5
524 #define SEMI_SYNC_FIFO_POLLING_COUNT	20
525 
526 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
527 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
528 #endif
529 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
530 #define PROTECTION_OVERRIDE_DEPTH_DWORDS   	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS 	* PROTECTION_OVERRIDE_ELEMENT_DWORDS)
531 
532 #define MCP_SPAD_TRACE_OFFSIZE_ADDR		(MCP_REG_SCRATCH + 	OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
533 
534 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
535 #define EMPTY_FW_IMAGE_STR		"???????????????"
536 
537 
538 /***************************** Constant Arrays *******************************/
539 
540 struct dbg_array {
541 	const u32 *ptr;
542 	u32 size_in_dwords;
543 };
544 
545 /* Debug arrays */
546 #ifdef USE_DBG_BIN_FILE
547 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
548 #else
549 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
550 
551 	/* BIN_BUF_DBG_MODE_TREE */
552 	{ (const u32*)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
553 
554 	/* BIN_BUF_DBG_DUMP_REG */
555 	{ dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
556 
557 	/* BIN_BUF_DBG_DUMP_MEM */
558 	{ dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
559 
560 	/* BIN_BUF_DBG_IDLE_CHK_REGS */
561 	{ idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
562 
563 	/* BIN_BUF_DBG_IDLE_CHK_IMMS */
564 	{ idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
565 
566 	/* BIN_BUF_DBG_IDLE_CHK_RULES */
567 	{ idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
568 
569 	/* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
570 	{ OSAL_NULL, 0 },
571 
572 	/* BIN_BUF_DBG_ATTN_BLOCKS */
573 	{ attn_block, OSAL_ARRAY_SIZE(attn_block) },
574 
575 	/* BIN_BUF_DBG_ATTN_REGSS */
576 	{ attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
577 
578 	/* BIN_BUF_DBG_ATTN_INDEXES */
579 	{ OSAL_NULL, 0 },
580 
581 	/* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
582 	{ OSAL_NULL, 0 },
583 
584 	/* BIN_BUF_DBG_BUS_BLOCKS */
585 	{ dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
586 
587 	/* BIN_BUF_DBG_BUS_LINES */
588 	{ dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
589 
590 	/* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
591 	{ OSAL_NULL, 0 },
592 
593 	/* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
594 	{ OSAL_NULL, 0 },
595 
596 	/* BIN_BUF_DBG_PARSING_STRINGS */
597 	{ OSAL_NULL, 0 }
598 };
599 #endif
600 
601 /* Chip constant definitions array */
602 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
603 	{ "bb",
604 
605 		/* ASIC */
606 		{ { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
607 
608 		/* EMUL_FULL */
609 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
610 
611 		/* EMUL_REDUCED */
612 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
613 
614 		/* FPGA */
615 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
616 
617 	{ "ah",
618 
619 		/* ASIC */
620 		{ { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
621 
622 		/* EMUL_FULL */
623 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
624 
625 		/* EMUL_REDUCED */
626 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
627 
628 		/* FPGA */
629 		{ MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
630 
631 	{ "e5",
632 
633 		/* ASIC */
634 		{ { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
635 
636 		/* EMUL_FULL */
637 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
638 
639 		/* EMUL_REDUCED */
640 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
641 
642 		/* FPGA */
643 		{ MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
644 };
645 
646 /* Storm constant definitions array */
647 static struct storm_defs s_storm_defs[] = {
648 
649 	/* Tstorm */
650 	{	'T', BLOCK_TSEM,
651 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
652 		TSEM_REG_FAST_MEMORY,
653 		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
654 		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
655 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
656 		TCM_REG_CTX_RBC_ACCS,
657 		4, TCM_REG_AGG_CON_CTX,
658 		16, TCM_REG_SM_CON_CTX,
659 		2, TCM_REG_AGG_TASK_CTX,
660 		4, TCM_REG_SM_TASK_CTX },
661 
662 	/* Mstorm */
663 	{	'M', BLOCK_MSEM,
664 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
665 		MSEM_REG_FAST_MEMORY,
666 		MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
667 		MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
668 		MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
669 		MCM_REG_CTX_RBC_ACCS,
670 		1, MCM_REG_AGG_CON_CTX,
671 		10, MCM_REG_SM_CON_CTX,
672 		2, MCM_REG_AGG_TASK_CTX,
673 		7, MCM_REG_SM_TASK_CTX },
674 
675 	/* Ustorm */
676 	{	'U', BLOCK_USEM,
677 		{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
678 		USEM_REG_FAST_MEMORY,
679 		USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
680 		USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
681 		USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
682 		UCM_REG_CTX_RBC_ACCS,
683 		2, UCM_REG_AGG_CON_CTX,
684 		13, UCM_REG_SM_CON_CTX,
685 		3, UCM_REG_AGG_TASK_CTX,
686 		3, UCM_REG_SM_TASK_CTX },
687 
688 	/* Xstorm */
689 	{	'X', BLOCK_XSEM,
690 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
691 		XSEM_REG_FAST_MEMORY,
692 		XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
693 		XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
694 		XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
695 		XCM_REG_CTX_RBC_ACCS,
696 		9, XCM_REG_AGG_CON_CTX,
697 		15, XCM_REG_SM_CON_CTX,
698 		0, 0,
699 		0, 0 },
700 
701 	/* Ystorm */
702 	{	'Y', BLOCK_YSEM,
703 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
704 		YSEM_REG_FAST_MEMORY,
705 		YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
706 		YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
707 		YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
708 		YCM_REG_CTX_RBC_ACCS,
709 		2, YCM_REG_AGG_CON_CTX,
710 		3, YCM_REG_SM_CON_CTX,
711 		2, YCM_REG_AGG_TASK_CTX,
712 		12, YCM_REG_SM_TASK_CTX },
713 
714 	/* Pstorm */
715 	{	'P', BLOCK_PSEM,
716 		{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
717 		PSEM_REG_FAST_MEMORY,
718 		PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
719 		PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
720 		PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
721 		PCM_REG_CTX_RBC_ACCS,
722 		0, 0,
723 		10, PCM_REG_SM_CON_CTX,
724 		0, 0,
725 		0, 0 }
726 };
727 
728 /* Block definitions array */
729 
730 static struct block_defs block_grc_defs = {
731 	"grc", { true, true, true }, false, 0,
732 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
733 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
734 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
735 	GRC_REG_DBG_FORCE_FRAME,
736 	true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
737 
738 static struct block_defs block_miscs_defs = {
739 	"miscs", { true, true, true }, false, 0,
740 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
741 	0, 0, 0, 0, 0,
742 	false, false, MAX_DBG_RESET_REGS, 0 };
743 
744 static struct block_defs block_misc_defs = {
745 	"misc", { true, true, true }, false, 0,
746 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
747 	0, 0, 0, 0, 0,
748 	false, false, MAX_DBG_RESET_REGS, 0 };
749 
750 static struct block_defs block_dbu_defs = {
751 	"dbu", { true, true, true }, false, 0,
752 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
753 	0, 0, 0, 0, 0,
754 	false, false, MAX_DBG_RESET_REGS, 0 };
755 
756 static struct block_defs block_pglue_b_defs = {
757 	"pglue_b", { true, true, true }, false, 0,
758 	{ DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
759 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
760 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
761 	PGLUE_B_REG_DBG_FORCE_FRAME,
762 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
763 
764 static struct block_defs block_cnig_defs = {
765 	"cnig", { true, true, true }, false, 0,
766 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
767 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
768 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
769 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
770 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
771 
772 static struct block_defs block_cpmu_defs = {
773 	"cpmu", { true, true, true }, false, 0,
774 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
775 	0, 0, 0, 0, 0,
776 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
777 
778 static struct block_defs block_ncsi_defs = {
779 	"ncsi", { true, true, true }, false, 0,
780 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
781 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
782 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
783 	NCSI_REG_DBG_FORCE_FRAME,
784 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
785 
786 static struct block_defs block_opte_defs = {
787 	"opte", { true, true, false }, false, 0,
788 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
789 	0, 0, 0, 0, 0,
790 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
791 
792 static struct block_defs block_bmb_defs = {
793 	"bmb", { true, true, true }, false, 0,
794 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
795 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
796 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
797 	BMB_REG_DBG_FORCE_FRAME,
798 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
799 
800 static struct block_defs block_pcie_defs = {
801 	"pcie", { true, true, true }, false, 0,
802 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
803 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
804 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
805 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
806 	false, false, MAX_DBG_RESET_REGS, 0 };
807 
808 static struct block_defs block_mcp_defs = {
809 	"mcp", { true, true, true }, false, 0,
810 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
811 	0, 0, 0, 0, 0,
812 	false, false, MAX_DBG_RESET_REGS, 0 };
813 
814 static struct block_defs block_mcp2_defs = {
815 	"mcp2", { true, true, true }, false, 0,
816 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
817 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
818 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
819 	MCP2_REG_DBG_FORCE_FRAME,
820 	false, false, MAX_DBG_RESET_REGS, 0 };
821 
822 static struct block_defs block_pswhst_defs = {
823 	"pswhst", { true, true, true }, false, 0,
824 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
825 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
826 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
827 	PSWHST_REG_DBG_FORCE_FRAME,
828 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
829 
830 static struct block_defs block_pswhst2_defs = {
831 	"pswhst2", { true, true, true }, false, 0,
832 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
833 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
834 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
835 	PSWHST2_REG_DBG_FORCE_FRAME,
836 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
837 
838 static struct block_defs block_pswrd_defs = {
839 	"pswrd", { true, true, true }, false, 0,
840 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
841 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
842 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
843 	PSWRD_REG_DBG_FORCE_FRAME,
844 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
845 
846 static struct block_defs block_pswrd2_defs = {
847 	"pswrd2", { true, true, true }, false, 0,
848 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
849 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
850 	PSWRD2_REG_DBG_SHIFT,	PSWRD2_REG_DBG_FORCE_VALID,
851 	PSWRD2_REG_DBG_FORCE_FRAME,
852 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
853 
854 static struct block_defs block_pswwr_defs = {
855 	"pswwr", { true, true, true }, false, 0,
856 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
857 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
858 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
859 	PSWWR_REG_DBG_FORCE_FRAME,
860 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
861 
862 static struct block_defs block_pswwr2_defs = {
863 	"pswwr2", { true, true, true }, false, 0,
864 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
865 	0, 0, 0, 0, 0,
866 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
867 
868 static struct block_defs block_pswrq_defs = {
869 	"pswrq", { true, true, true }, false, 0,
870 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
871 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
872 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
873 	PSWRQ_REG_DBG_FORCE_FRAME,
874 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
875 
876 static struct block_defs block_pswrq2_defs = {
877 	"pswrq2", { true, true, true }, false, 0,
878 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
879 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
880 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
881 	PSWRQ2_REG_DBG_FORCE_FRAME,
882 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
883 
884 static struct block_defs block_pglcs_defs =	{
885 	"pglcs", { true, true, true }, false, 0,
886 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
887 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
888 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
889 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
890 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
891 
892 static struct block_defs block_ptu_defs ={
893 	"ptu", { true, true, true }, false, 0,
894 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
895 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
896 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
897 	PTU_REG_DBG_FORCE_FRAME,
898 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
899 
900 static struct block_defs block_dmae_defs = {
901 	"dmae", { true, true, true }, false, 0,
902 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
903 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
904 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
905 	DMAE_REG_DBG_FORCE_FRAME,
906 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
907 
908 static struct block_defs block_tcm_defs = {
909 	"tcm", { true, true, true }, true, DBG_TSTORM_ID,
910 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
911 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
912 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
913 	TCM_REG_DBG_FORCE_FRAME,
914 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
915 
916 static struct block_defs block_mcm_defs = {
917 	"mcm", { true, true, true }, true, DBG_MSTORM_ID,
918 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
919 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
920 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
921 	MCM_REG_DBG_FORCE_FRAME,
922 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
923 
924 static struct block_defs block_ucm_defs = {
925 	"ucm", { true, true, true }, true, DBG_USTORM_ID,
926 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
927 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
928 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
929 	UCM_REG_DBG_FORCE_FRAME,
930 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
931 
932 static struct block_defs block_xcm_defs = {
933 	"xcm", { true, true, true }, true, DBG_XSTORM_ID,
934 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
935 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
936 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
937 	XCM_REG_DBG_FORCE_FRAME,
938 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
939 
940 static struct block_defs block_ycm_defs = {
941 	"ycm", { true, true, true }, true, DBG_YSTORM_ID,
942 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
943 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
944 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
945 	YCM_REG_DBG_FORCE_FRAME,
946 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
947 
948 static struct block_defs block_pcm_defs = {
949 	"pcm", { true, true, true }, true, DBG_PSTORM_ID,
950 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
951 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
952 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
953 	PCM_REG_DBG_FORCE_FRAME,
954 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
955 
956 static struct block_defs block_qm_defs = {
957 	"qm", { true, true, true }, false, 0,
958 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
959 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
960 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
961 	QM_REG_DBG_FORCE_FRAME,
962 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
963 
964 static struct block_defs block_tm_defs = {
965 	"tm", { true, true, true }, false, 0,
966 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
967 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
968 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
969 	TM_REG_DBG_FORCE_FRAME,
970 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
971 
972 static struct block_defs block_dorq_defs = {
973 	"dorq", { true, true, true }, false, 0,
974 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
975 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
976 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
977 	DORQ_REG_DBG_FORCE_FRAME,
978 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
979 
980 static struct block_defs block_brb_defs = {
981 	"brb", { true, true, true }, false, 0,
982 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
983 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
984 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
985 	BRB_REG_DBG_FORCE_FRAME,
986 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
987 
988 static struct block_defs block_src_defs = {
989 	"src", { true, true, true }, false, 0,
990 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
991 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
992 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
993 	SRC_REG_DBG_FORCE_FRAME,
994 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
995 
996 static struct block_defs block_prs_defs = {
997 	"prs", { true, true, true }, false, 0,
998 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
999 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1000 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1001 	PRS_REG_DBG_FORCE_FRAME,
1002 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1003 
1004 static struct block_defs block_tsdm_defs = {
1005 	"tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1006 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1007 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1008 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1009 	TSDM_REG_DBG_FORCE_FRAME,
1010 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1011 
1012 static struct block_defs block_msdm_defs = {
1013 	"msdm", { true, true, true }, true, DBG_MSTORM_ID,
1014 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1015 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1016 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1017 	MSDM_REG_DBG_FORCE_FRAME,
1018 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1019 
1020 static struct block_defs block_usdm_defs = {
1021 	"usdm", { true, true, true }, true, DBG_USTORM_ID,
1022 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1023 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1024 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1025 	USDM_REG_DBG_FORCE_FRAME,
1026 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1027 	};
1028 static struct block_defs block_xsdm_defs = {
1029 	"xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1030 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1031 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1032 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1033 	XSDM_REG_DBG_FORCE_FRAME,
1034 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1035 
1036 static struct block_defs block_ysdm_defs = {
1037 	"ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1038 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1039 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1040 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1041 	YSDM_REG_DBG_FORCE_FRAME,
1042 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1043 
1044 static struct block_defs block_psdm_defs = {
1045 	"psdm", { true, true, true }, true, DBG_PSTORM_ID,
1046 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1047 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1048 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1049 	PSDM_REG_DBG_FORCE_FRAME,
1050 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1051 
1052 static struct block_defs block_tsem_defs = {
1053 	"tsem", { true, true, true }, true, DBG_TSTORM_ID,
1054 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1055 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1056 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1057 	TSEM_REG_DBG_FORCE_FRAME,
1058 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1059 
1060 static struct block_defs block_msem_defs = {
1061 	"msem", { true, true, true }, true, DBG_MSTORM_ID,
1062 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1063 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1064 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1065 	MSEM_REG_DBG_FORCE_FRAME,
1066 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1067 
1068 static struct block_defs block_usem_defs = {
1069 	"usem", { true, true, true }, true, DBG_USTORM_ID,
1070 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1071 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1072 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1073 	USEM_REG_DBG_FORCE_FRAME,
1074 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1075 
1076 static struct block_defs block_xsem_defs = {
1077 	"xsem", { true, true, true }, true, DBG_XSTORM_ID,
1078 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1079 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1080 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1081 	XSEM_REG_DBG_FORCE_FRAME,
1082 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1083 
1084 static struct block_defs block_ysem_defs = {
1085 	"ysem", { true, true, true }, true, DBG_YSTORM_ID,
1086 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1087 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1088 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1089 	YSEM_REG_DBG_FORCE_FRAME,
1090 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1091 
1092 static struct block_defs block_psem_defs = {
1093 	"psem", { true, true, true }, true, DBG_PSTORM_ID,
1094 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1095 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1096 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1097 	PSEM_REG_DBG_FORCE_FRAME,
1098 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1099 
1100 static struct block_defs block_rss_defs = {
1101 	"rss", { true, true, true }, false, 0,
1102 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1103 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1104 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1105 	RSS_REG_DBG_FORCE_FRAME,
1106 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1107 
1108 static struct block_defs block_tmld_defs = {
1109 	"tmld", { true, true, true }, false, 0,
1110 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1111 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1112 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1113 	TMLD_REG_DBG_FORCE_FRAME,
1114 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1115 
1116 static struct block_defs block_muld_defs = {
1117 	"muld", { true, true, true }, false, 0,
1118 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1119 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1120 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1121 	MULD_REG_DBG_FORCE_FRAME,
1122 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1123 
1124 static struct block_defs block_yuld_defs = {
1125 	"yuld", { true, true, false }, false, 0,
1126 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1127 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1128 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1129 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1130 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1131 
1132 static struct block_defs block_xyld_defs = {
1133 	"xyld", { true, true, true }, false, 0,
1134 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1135 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1136 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1137 	XYLD_REG_DBG_FORCE_FRAME,
1138 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1139 
1140 static struct block_defs block_prm_defs = {
1141 	"prm", { true, true, true }, false, 0,
1142 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1143 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1144 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1145 	PRM_REG_DBG_FORCE_FRAME,
1146 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1147 
1148 static struct block_defs block_pbf_pb1_defs = {
1149 	"pbf_pb1", { true, true, true }, false, 0,
1150 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1151 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1152 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1153 	PBF_PB1_REG_DBG_FORCE_FRAME,
1154 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1155 
1156 static struct block_defs block_pbf_pb2_defs = {
1157 	"pbf_pb2", { true, true, true }, false, 0,
1158 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1159 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1160 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1161 	PBF_PB2_REG_DBG_FORCE_FRAME,
1162 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1163 
1164 static struct block_defs block_rpb_defs = {
1165 	"rpb", { true, true, true }, false, 0,
1166 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1167 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1168 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1169 	RPB_REG_DBG_FORCE_FRAME,
1170 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1171 
1172 static struct block_defs block_btb_defs = {
1173 	"btb", { true, true, true }, false, 0,
1174 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1175 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1176 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1177 	BTB_REG_DBG_FORCE_FRAME,
1178 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1179 
1180 static struct block_defs block_pbf_defs = {
1181 	"pbf", { true, true, true }, false, 0,
1182 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1183 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1184 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1185 	PBF_REG_DBG_FORCE_FRAME,
1186 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1187 
1188 static struct block_defs block_rdif_defs = {
1189 	"rdif", { true, true, true }, false, 0,
1190 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1191 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1192 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1193 	RDIF_REG_DBG_FORCE_FRAME,
1194 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1195 
1196 static struct block_defs block_tdif_defs = {
1197 	"tdif", { true, true, true }, false, 0,
1198 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1199 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1200 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1201 	TDIF_REG_DBG_FORCE_FRAME,
1202 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1203 
1204 static struct block_defs block_cdu_defs = {
1205 	"cdu", { true, true, true }, false, 0,
1206 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1207 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1208 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1209 	CDU_REG_DBG_FORCE_FRAME,
1210 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1211 
1212 static struct block_defs block_ccfc_defs = {
1213 	"ccfc", { true, true, true }, false, 0,
1214 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1215 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1216 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1217 	CCFC_REG_DBG_FORCE_FRAME,
1218 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1219 
1220 static struct block_defs block_tcfc_defs = {
1221 	"tcfc", { true, true, true }, false, 0,
1222 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1223 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1224 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1225 	TCFC_REG_DBG_FORCE_FRAME,
1226 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1227 
1228 static struct block_defs block_igu_defs = {
1229 	"igu", { true, true, true }, false, 0,
1230 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1231 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1232 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1233 	IGU_REG_DBG_FORCE_FRAME,
1234 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1235 
1236 static struct block_defs block_cau_defs = {
1237 	"cau", { true, true, true }, false, 0,
1238 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1239 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1240 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1241 	CAU_REG_DBG_FORCE_FRAME,
1242 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1243 
1244 static struct block_defs block_umac_defs = {
1245 	"umac", { true, true, true }, false, 0,
1246 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1247 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1248 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1249 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1250 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1251 
1252 static struct block_defs block_xmac_defs = {
1253 	"xmac", { true, false, false }, false, 0,
1254 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1255 	0, 0, 0, 0, 0,
1256 	false, false, MAX_DBG_RESET_REGS, 0	};
1257 
1258 static struct block_defs block_dbg_defs = {
1259 	"dbg", { true, true, true }, false, 0,
1260 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1261 	0, 0, 0, 0, 0,
1262 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1263 
1264 static struct block_defs block_nig_defs = {
1265 	"nig", { true, true, true }, false, 0,
1266 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1267 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1268 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1269 	NIG_REG_DBG_FORCE_FRAME,
1270 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1271 
1272 static struct block_defs block_wol_defs = {
1273 	"wol", { false, true, true }, false, 0,
1274 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1275 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1276 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1277 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1278 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1279 
1280 static struct block_defs block_bmbn_defs = {
1281 	"bmbn", { false, true, true }, false, 0,
1282 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1283 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1284 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1285 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1286 	false, false, MAX_DBG_RESET_REGS, 0 };
1287 
1288 static struct block_defs block_ipc_defs = {
1289 	"ipc", { true, true, true }, false, 0,
1290 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1291 	0, 0, 0, 0, 0,
1292 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1293 
1294 static struct block_defs block_nwm_defs = {
1295 	"nwm", { false, true, true }, false, 0,
1296 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1297 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1298 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1299 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1300 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1301 
1302 static struct block_defs block_nws_defs = {
1303 	"nws", { false, true, true }, false, 0,
1304 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1305 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1306 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1307 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1308 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1309 
1310 static struct block_defs block_ms_defs = {
1311 	"ms", { false, true, true }, false, 0,
1312 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1313 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1314 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1315 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1316 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1317 
1318 static struct block_defs block_phy_pcie_defs = {
1319 	"phy_pcie", { false, true, true }, false, 0,
1320 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1321 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1322 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1323 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1324 	false, false, MAX_DBG_RESET_REGS, 0 };
1325 
1326 static struct block_defs block_led_defs = {
1327 	"led", { false, true, true }, false, 0,
1328 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1329 	0, 0, 0, 0, 0,
1330 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1331 
1332 static struct block_defs block_avs_wrap_defs = {
1333 	"avs_wrap", { false, true, false }, false, 0,
1334 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1335 	0, 0, 0, 0, 0,
1336 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1337 
1338 /* TODO: add debug bus parameters when E5 RGFS RF is added */
1339 static struct block_defs block_rgfs_defs = {
1340 	"rgfs", { false, false, true }, false, 0,
1341 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1342 	0, 0, 0, 0, 0,
1343 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1344 
1345 static struct block_defs block_rgsrc_defs = {
1346 	"rgsrc", { false, false, true }, false, 0,
1347 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1348 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1349 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1350 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1351 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1352 
1353 /* TODO: add debug bus parameters when E5 TGFS RF is added */
1354 static struct block_defs block_tgfs_defs = {
1355 	"tgfs", { false, false, true }, false, 0,
1356 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1357 	0, 0, 0, 0, 0,
1358 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1359 
1360 static struct block_defs block_tgsrc_defs = {
1361 	"tgsrc", { false, false, true }, false, 0,
1362 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1363 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1364 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1365 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1366 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1367 
1368 static struct block_defs block_ptld_defs = {
1369 	"ptld", { false, false, true }, false, 0,
1370 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1371 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1372 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1373 	PTLD_REG_DBG_FORCE_FRAME_E5,
1374 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1375 
1376 static struct block_defs block_ypld_defs = {
1377 	"ypld", { false, false, true }, false, 0,
1378 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1379 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1380 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1381 	YPLD_REG_DBG_FORCE_FRAME_E5,
1382 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1383 
1384 static struct block_defs block_misc_aeu_defs = {
1385 	"misc_aeu", { true, true, true }, false, 0,
1386 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1387 	0, 0, 0, 0, 0,
1388 	false, false, MAX_DBG_RESET_REGS, 0 };
1389 
1390 static struct block_defs block_bar0_map_defs = {
1391 	"bar0_map", { true, true, true }, false, 0,
1392 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1393 	0, 0, 0, 0, 0,
1394 	false, false, MAX_DBG_RESET_REGS, 0 };
1395 
1396 
1397 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1398 	&block_grc_defs,
1399  	&block_miscs_defs,
1400  	&block_misc_defs,
1401  	&block_dbu_defs,
1402  	&block_pglue_b_defs,
1403  	&block_cnig_defs,
1404  	&block_cpmu_defs,
1405  	&block_ncsi_defs,
1406  	&block_opte_defs,
1407  	&block_bmb_defs,
1408  	&block_pcie_defs,
1409  	&block_mcp_defs,
1410  	&block_mcp2_defs,
1411  	&block_pswhst_defs,
1412  	&block_pswhst2_defs,
1413  	&block_pswrd_defs,
1414  	&block_pswrd2_defs,
1415  	&block_pswwr_defs,
1416  	&block_pswwr2_defs,
1417  	&block_pswrq_defs,
1418  	&block_pswrq2_defs,
1419  	&block_pglcs_defs,
1420  	&block_dmae_defs,
1421  	&block_ptu_defs,
1422  	&block_tcm_defs,
1423  	&block_mcm_defs,
1424  	&block_ucm_defs,
1425  	&block_xcm_defs,
1426  	&block_ycm_defs,
1427  	&block_pcm_defs,
1428  	&block_qm_defs,
1429  	&block_tm_defs,
1430  	&block_dorq_defs,
1431  	&block_brb_defs,
1432  	&block_src_defs,
1433  	&block_prs_defs,
1434  	&block_tsdm_defs,
1435  	&block_msdm_defs,
1436  	&block_usdm_defs,
1437  	&block_xsdm_defs,
1438  	&block_ysdm_defs,
1439  	&block_psdm_defs,
1440  	&block_tsem_defs,
1441  	&block_msem_defs,
1442  	&block_usem_defs,
1443  	&block_xsem_defs,
1444  	&block_ysem_defs,
1445  	&block_psem_defs,
1446  	&block_rss_defs,
1447  	&block_tmld_defs,
1448  	&block_muld_defs,
1449  	&block_yuld_defs,
1450  	&block_xyld_defs,
1451  	&block_ptld_defs,
1452  	&block_ypld_defs,
1453  	&block_prm_defs,
1454  	&block_pbf_pb1_defs,
1455  	&block_pbf_pb2_defs,
1456  	&block_rpb_defs,
1457  	&block_btb_defs,
1458  	&block_pbf_defs,
1459  	&block_rdif_defs,
1460  	&block_tdif_defs,
1461  	&block_cdu_defs,
1462  	&block_ccfc_defs,
1463  	&block_tcfc_defs,
1464  	&block_igu_defs,
1465  	&block_cau_defs,
1466  	&block_rgfs_defs,
1467  	&block_rgsrc_defs,
1468  	&block_tgfs_defs,
1469  	&block_tgsrc_defs,
1470  	&block_umac_defs,
1471  	&block_xmac_defs,
1472  	&block_dbg_defs,
1473  	&block_nig_defs,
1474  	&block_wol_defs,
1475  	&block_bmbn_defs,
1476  	&block_ipc_defs,
1477  	&block_nwm_defs,
1478  	&block_nws_defs,
1479  	&block_ms_defs,
1480  	&block_phy_pcie_defs,
1481  	&block_led_defs,
1482  	&block_avs_wrap_defs,
1483  	&block_misc_aeu_defs,
1484  	&block_bar0_map_defs,
1485 
1486 };
1487 
1488 
1489 /* Constraint operation types */
1490 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1491 
1492 	/* DBG_BUS_CONSTRAINT_OP_EQ */
1493 	{ 0, false },
1494 
1495 	/* DBG_BUS_CONSTRAINT_OP_NE */
1496 	{ 5, false },
1497 
1498 	/* DBG_BUS_CONSTRAINT_OP_LT */
1499 	{ 1, false },
1500 
1501 	/* DBG_BUS_CONSTRAINT_OP_LTC */
1502 	{ 1, true },
1503 
1504 	/* DBG_BUS_CONSTRAINT_OP_LE */
1505 	{ 2, false },
1506 
1507 	/* DBG_BUS_CONSTRAINT_OP_LEC */
1508 	{ 2, true },
1509 
1510 	/* DBG_BUS_CONSTRAINT_OP_GT */
1511 	{ 4, false },
1512 
1513 	/* DBG_BUS_CONSTRAINT_OP_GTC */
1514 	{ 4, true },
1515 
1516 	/* DBG_BUS_CONSTRAINT_OP_GE */
1517 	{ 3, false },
1518 
1519 	/* DBG_BUS_CONSTRAINT_OP_GEC */
1520 	{ 3, true }
1521 };
1522 
1523 static const char* s_dbg_target_names[] = {
1524 
1525 	/* DBG_BUS_TARGET_ID_INT_BUF */
1526 	"int-buf",
1527 
1528 	/* DBG_BUS_TARGET_ID_NIG */
1529 	"nw",
1530 
1531 	/* DBG_BUS_TARGET_ID_PCI */
1532 	"pci-buf"
1533 };
1534 
1535 static struct storm_mode_defs s_storm_mode_defs[] = {
1536 
1537 	/* DBG_BUS_STORM_MODE_PRINTF */
1538 	{ "printf", true, 0 },
1539 
1540 	/* DBG_BUS_STORM_MODE_PRAM_ADDR */
1541 	{ "pram_addr", true, 1 },
1542 
1543 	/* DBG_BUS_STORM_MODE_DRA_RW */
1544 	{ "dra_rw", true, 2 },
1545 
1546 	/* DBG_BUS_STORM_MODE_DRA_W */
1547 	{ "dra_w", true, 3 },
1548 
1549 	/* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1550 	{ "ld_st_addr", true, 4 },
1551 
1552 	/* DBG_BUS_STORM_MODE_DRA_FSM */
1553 	{ "dra_fsm", true, 5 },
1554 
1555 	/* DBG_BUS_STORM_MODE_RH */
1556 	{ "rh", true, 6 },
1557 
1558 	/* DBG_BUS_STORM_MODE_FOC */
1559 	{ "foc", false, 1 },
1560 
1561 	/* DBG_BUS_STORM_MODE_EXT_STORE */
1562 	{ "ext_store", false, 3 }
1563 };
1564 
1565 static struct platform_defs s_platform_defs[] = {
1566 
1567 	/* PLATFORM_ASIC */
1568 	{ "asic", 1 },
1569 
1570 	/* PLATFORM_EMUL_FULL */
1571 	{ "emul_full", 2000 },
1572 
1573 	/* PLATFORM_EMUL_REDUCED */
1574 	{ "emul_reduced", 2000 },
1575 
1576 	/* PLATFORM_FPGA */
1577 	{ "fpga", 200 }
1578 };
1579 
1580 static struct grc_param_defs s_grc_param_defs[] = {
1581 
1582 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1583 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1584 
1585 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1586 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1587 
1588 	/* DBG_GRC_PARAM_DUMP_USTORM */
1589 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1590 
1591 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1592 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1593 
1594 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1595 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1596 
1597 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1598 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1599 
1600 	/* DBG_GRC_PARAM_DUMP_REGS */
1601 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1602 
1603 	/* DBG_GRC_PARAM_DUMP_RAM */
1604 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1605 
1606 	/* DBG_GRC_PARAM_DUMP_PBUF */
1607 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1608 
1609 	/* DBG_GRC_PARAM_DUMP_IOR */
1610 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1611 
1612 	/* DBG_GRC_PARAM_DUMP_VFC */
1613 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1614 
1615 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1616 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1617 
1618 	/* DBG_GRC_PARAM_DUMP_ILT */
1619 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1620 
1621 	/* DBG_GRC_PARAM_DUMP_RSS */
1622 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1623 
1624 	/* DBG_GRC_PARAM_DUMP_CAU */
1625 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1626 
1627 	/* DBG_GRC_PARAM_DUMP_QM */
1628 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1629 
1630 	/* DBG_GRC_PARAM_DUMP_MCP */
1631 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1632 
1633 	/* DBG_GRC_PARAM_RESERVED */
1634 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1635 
1636 	/* DBG_GRC_PARAM_DUMP_CFC */
1637 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1638 
1639 	/* DBG_GRC_PARAM_DUMP_IGU */
1640 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1641 
1642 	/* DBG_GRC_PARAM_DUMP_BRB */
1643 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1644 
1645 	/* DBG_GRC_PARAM_DUMP_BTB */
1646 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1647 
1648 	/* DBG_GRC_PARAM_DUMP_BMB */
1649 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1650 
1651 	/* DBG_GRC_PARAM_DUMP_NIG */
1652 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1653 
1654 	/* DBG_GRC_PARAM_DUMP_MULD */
1655 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1656 
1657 	/* DBG_GRC_PARAM_DUMP_PRS */
1658 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1659 
1660 	/* DBG_GRC_PARAM_DUMP_DMAE */
1661 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1662 
1663 	/* DBG_GRC_PARAM_DUMP_TM */
1664 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1665 
1666 	/* DBG_GRC_PARAM_DUMP_SDM */
1667 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1668 
1669 	/* DBG_GRC_PARAM_DUMP_DIF */
1670 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1671 
1672 	/* DBG_GRC_PARAM_DUMP_STATIC */
1673 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1674 
1675 	/* DBG_GRC_PARAM_UNSTALL */
1676 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1677 
1678 	/* DBG_GRC_PARAM_NUM_LCIDS */
1679 	{ { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1680 
1681 	/* DBG_GRC_PARAM_NUM_LTIDS */
1682 	{ { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1683 
1684 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1685 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1686 
1687 	/* DBG_GRC_PARAM_CRASH */
1688 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1689 
1690 	/* DBG_GRC_PARAM_PARITY_SAFE */
1691 	{ { 0, 0, 0 }, 0, 1, false, 1, 0 },
1692 
1693 	/* DBG_GRC_PARAM_DUMP_CM */
1694 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1695 
1696 	/* DBG_GRC_PARAM_DUMP_PHY */
1697 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1698 
1699 	/* DBG_GRC_PARAM_NO_MCP */
1700 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1701 
1702 	/* DBG_GRC_PARAM_NO_FW_VER */
1703 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 }
1704 };
1705 
1706 static struct rss_mem_defs s_rss_mem_defs[] = {
1707 	{ "rss_mem_cid", "rss_cid", 0, 32,
1708 	{ 256, 320, 512 } },
1709 
1710 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1711 	{ 128, 208, 257 } },
1712 
1713 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1714 	{ 128, 208, 257 } },
1715 
1716 	{ "rss_mem_info", "rss_info", 3072, 16,
1717 	{ 128, 208, 256 } },
1718 
1719 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1720 	{ 16384, 26624, 32768 } }
1721 };
1722 
1723 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1724 	{ "vfc_ram_tt1", "vfc_ram", 0, 512 },
1725 	{ "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1726 	{ "vfc_ram_stt2", "vfc_ram", 640, 32 },
1727 	{ "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1728 };
1729 
1730 static struct big_ram_defs s_big_ram_defs[] = {
1731 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1732 	  { 4800, 5632, 4416 } },
1733 
1734 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1735 	  { 2880, 3680, 2640 } },
1736 
1737 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1738 	  { 1152, 1152, 1152 } }
1739 };
1740 
1741 static struct reset_reg_defs s_reset_regs_defs[] = {
1742 
1743 	/* DBG_RESET_REG_MISCS_PL_UA */
1744 	{ MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1745 
1746 	/* DBG_RESET_REG_MISCS_PL_HV */
1747 	{ MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1748 
1749 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1750 	{ MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1751 
1752 	/* DBG_RESET_REG_MISC_PL_UA */
1753 	{ MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1754 
1755 	/* DBG_RESET_REG_MISC_PL_HV */
1756 	{ MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1757 
1758 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1759 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1760 
1761 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1762 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1763 
1764 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1765 	{ MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1766 };
1767 
1768 static struct phy_defs s_phy_defs[] = {
1769 	{ "nw_phy", NWS_REG_NWS_CMU_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1770 	{ "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1771 	{ "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1772 	{ "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1773 };
1774 
1775 /* The order of indexes that should be applied to a PCI buffer line */
1776 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1777 
1778 /******************************** Variables **********************************/
1779 
1780 /* The version of the calling app */
1781 static u32 s_app_ver;
1782 
1783 /**************************** Private Functions ******************************/
1784 
1785 static void ecore_static_asserts(void)
1786 {
1787 	CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1788 	CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1789 	CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1790 	CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1791 	CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1792 	CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1793 	CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1794 	CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1795 	CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1796 	CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1797 	CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1798 	CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1799 }
1800 
1801 /* Reads and returns a single dword from the specified unaligned buffer. */
1802 static u32 ecore_read_unaligned_dword(u8 *buf)
1803 {
1804 	u32 dword;
1805 
1806 	OSAL_MEMCPY((u8*)&dword, buf, sizeof(dword));
1807 	return dword;
1808 }
1809 
1810 /* Returns the difference in bytes between the specified physical addresses.
1811  * Assumes that the first address is bigger then the second, and that the
1812  * difference is a 32-bit value.
1813  */
1814 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1815 								struct dbg_bus_mem_addr *b)
1816 {
1817 	return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1818 }
1819 
1820 /* Sets the value of the specified GRC param */
1821 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1822 				 enum dbg_grc_params grc_param,
1823 				 u32 val)
1824 {
1825 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1826 
1827 	dev_data->grc.param_val[grc_param] = val;
1828 }
1829 
1830 /* Returns the value of the specified GRC param */
1831 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1832 							   enum dbg_grc_params grc_param)
1833 {
1834 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1835 
1836 	return dev_data->grc.param_val[grc_param];
1837 }
1838 
1839 /* Initializes the GRC parameters */
1840 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1841 {
1842 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1843 
1844 	if (!dev_data->grc.params_initialized) {
1845 		ecore_dbg_grc_set_params_default(p_hwfn);
1846 		dev_data->grc.params_initialized = 1;
1847 	}
1848 }
1849 
1850 /* Initializes debug data for the specified device */
1851 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1852 										  struct ecore_ptt *p_ptt)
1853 {
1854 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1855 
1856 	if (dev_data->initialized)
1857 		return DBG_STATUS_OK;
1858 
1859 	if (!s_app_ver)
1860 		return DBG_STATUS_APP_VERSION_NOT_SET;
1861 
1862 	if (ECORE_IS_E5(p_hwfn->p_dev)) {
1863 		dev_data->chip_id = CHIP_E5;
1864 		dev_data->mode_enable[MODE_E5] = 1;
1865 	}
1866 	else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1867 		dev_data->chip_id = CHIP_K2;
1868 		dev_data->mode_enable[MODE_K2] = 1;
1869 	}
1870 	else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1871 		dev_data->chip_id = CHIP_BB;
1872 		dev_data->mode_enable[MODE_BB] = 1;
1873 	}
1874 	else {
1875 		return DBG_STATUS_UNKNOWN_CHIP;
1876 	}
1877 
1878 #ifdef ASIC_ONLY
1879 	dev_data->platform_id = PLATFORM_ASIC;
1880 	dev_data->mode_enable[MODE_ASIC] = 1;
1881 #else
1882 	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1883 		dev_data->platform_id = PLATFORM_ASIC;
1884 		dev_data->mode_enable[MODE_ASIC] = 1;
1885 	}
1886 	else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1887 		if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1888 			dev_data->platform_id = PLATFORM_EMUL_FULL;
1889 			dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1890 		}
1891 		else {
1892 			dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1893 			dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1894 		}
1895 	}
1896 	else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1897 		dev_data->platform_id = PLATFORM_FPGA;
1898 		dev_data->mode_enable[MODE_FPGA] = 1;
1899 	}
1900 	else {
1901 		return DBG_STATUS_UNKNOWN_CHIP;
1902 	}
1903 #endif
1904 
1905 	/* Initializes the GRC parameters */
1906 	ecore_dbg_grc_init_params(p_hwfn);
1907 
1908 	dev_data->initialized = true;
1909 
1910 	return DBG_STATUS_OK;
1911 }
1912 
1913 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1914 														  enum block_id block_id)
1915 {
1916 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1917 
1918 	return (struct dbg_bus_block*)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1919 }
1920 
1921 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1922 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1923 														enum block_id block_id)
1924 {
1925 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1926 	struct dbg_bus_block_data *block_bus;
1927 	struct dbg_bus_block *block_desc;
1928 
1929 	block_bus = &dev_data->bus.blocks[block_id];
1930 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1931 
1932 	if (!block_bus->line_num ||
1933 		(block_bus->line_num == 1 && block_desc->has_latency_events) ||
1934 		block_bus->line_num >= NUM_DBG_LINES(block_desc))
1935 		return OSAL_NULL;
1936 
1937 	return (struct dbg_bus_line*)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1938 }
1939 
1940 /* Reads the FW info structure for the specified Storm from the chip,
1941  * and writes it to the specified fw_info pointer.
1942  */
1943 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1944 							   struct ecore_ptt *p_ptt,
1945 							   u8 storm_id,
1946 							   struct fw_info *fw_info)
1947 {
1948 	struct storm_defs *storm = &s_storm_defs[storm_id];
1949 	struct fw_info_location fw_info_location;
1950 	u32 addr, i, *dest;
1951 
1952 	OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1953 	OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1954 
1955 	/* Read first the address that points to fw_info location.
1956 	 * The address is located in the last line of the Storm RAM.
1957 	 */
1958 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location);
1959 	dest = (u32*)&fw_info_location;
1960 
1961 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1962 		dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1963 
1964 	/* Read FW version info from Storm RAM */
1965 	if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1966 		addr = fw_info_location.grc_addr;
1967 		dest = (u32*)fw_info;
1968 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1969 			dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1970 	}
1971 }
1972 
1973 /* Dumps the specified string to the specified buffer.
1974  * Returns the dumped size in bytes.
1975  */
1976 static u32 ecore_dump_str(char *dump_buf,
1977 						  bool dump,
1978 						  const char *str)
1979 {
1980 	if (dump)
1981 		OSAL_STRCPY(dump_buf, str);
1982 
1983 	return (u32)OSAL_STRLEN(str) + 1;
1984 }
1985 
1986 /* Dumps zeros to align the specified buffer to dwords.
1987  * Returns the dumped size in bytes.
1988  */
1989 static u32 ecore_dump_align(char *dump_buf,
1990 							bool dump,
1991 							u32 byte_offset)
1992 {
1993 	u8 offset_in_dword, align_size;
1994 
1995 	offset_in_dword = (u8)(byte_offset & 0x3);
1996 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1997 
1998 	if (dump && align_size)
1999 		OSAL_MEMSET(dump_buf, 0, align_size);
2000 
2001 	return align_size;
2002 }
2003 
2004 /* Writes the specified string param to the specified buffer.
2005  * Returns the dumped size in dwords.
2006  */
2007 static u32 ecore_dump_str_param(u32 *dump_buf,
2008 								bool dump,
2009 								const char *param_name,
2010 								const char *param_val)
2011 {
2012 	char *char_buf = (char*)dump_buf;
2013 	u32 offset = 0;
2014 
2015 	/* Dump param name */
2016 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2017 
2018 	/* Indicate a string param value */
2019 	if (dump)
2020 		*(char_buf + offset) = 1;
2021 	offset++;
2022 
2023 	/* Dump param value */
2024 	offset += ecore_dump_str(char_buf + offset, dump, param_val);
2025 
2026 	/* Align buffer to next dword */
2027 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2028 
2029 	return BYTES_TO_DWORDS(offset);
2030 }
2031 
2032 /* Writes the specified numeric param to the specified buffer.
2033  * Returns the dumped size in dwords.
2034  */
2035 static u32 ecore_dump_num_param(u32 *dump_buf,
2036 								bool dump,
2037 								const char *param_name,
2038 								u32 param_val)
2039 {
2040 	char *char_buf = (char*)dump_buf;
2041 	u32 offset = 0;
2042 
2043 	/* Dump param name */
2044 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2045 
2046 	/* Indicate a numeric param value */
2047 	if (dump)
2048 		*(char_buf + offset) = 0;
2049 	offset++;
2050 
2051 	/* Align buffer to next dword */
2052 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2053 
2054 	/* Dump param value (and change offset from bytes to dwords) */
2055 	offset = BYTES_TO_DWORDS(offset);
2056 	if (dump)
2057 		*(dump_buf + offset) = param_val;
2058 	offset++;
2059 
2060 	return offset;
2061 }
2062 
2063 /* Reads the FW version and writes it as a param to the specified buffer.
2064  * Returns the dumped size in dwords.
2065  */
2066 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2067 								   struct ecore_ptt *p_ptt,
2068 								   u32 *dump_buf,
2069 								   bool dump)
2070 {
2071 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2072 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2073 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2074 	struct fw_info fw_info = { { 0 }, { 0 } };
2075 	u32 offset = 0;
2076 
2077 	if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2078 		/* Read FW image/version from PRAM in a non-reset SEMI */
2079 		bool found = false;
2080 		u8 storm_id;
2081 
2082 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2083 			struct storm_defs *storm = &s_storm_defs[storm_id];
2084 
2085 			/* Read FW version/image */
2086 			if (dev_data->block_in_reset[storm->block_id])
2087 				continue;
2088 
2089 			/* Read FW info for the current Storm */
2090 			ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2091 
2092 			/* Create FW version/image strings */
2093 			if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2094 				DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2095 			switch (fw_info.ver.image_id) {
2096 			case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2097 			case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2098 			case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2099 			default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2100 			}
2101 
2102 			found = true;
2103 		}
2104 	}
2105 
2106 	/* Dump FW version, image and timestamp */
2107 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2108 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2109 	offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2110 
2111 	return offset;
2112 }
2113 
2114 /* Reads the MFW version and writes it as a param to the specified buffer.
2115  * Returns the dumped size in dwords.
2116  */
2117 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2118 									struct ecore_ptt *p_ptt,
2119 									u32 *dump_buf,
2120 									bool dump)
2121 {
2122 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2123 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2124 
2125 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2126 		u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2127 
2128 		/* Find MCP public data GRC address. Needs to be ORed with
2129 		 * MCP_REG_SCRATCH due to a HW bug.
2130 		 */
2131 		public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2132 
2133 		/* Find MCP public global section offset */
2134 		global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2135 		global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2136 		global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2137 
2138 		/* Read MFW version from MCP public global section */
2139 		mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2140 
2141 		/* Dump MFW version param */
2142 		if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2143 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2144 	}
2145 
2146 	return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2147 }
2148 
2149 /* Writes a section header to the specified buffer.
2150  * Returns the dumped size in dwords.
2151  */
2152 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2153 								  bool dump,
2154 								  const char *name,
2155 								  u32 num_params)
2156 {
2157 	return ecore_dump_num_param(dump_buf, dump, name, num_params);
2158 }
2159 
2160 /* Writes the common global params to the specified buffer.
2161  * Returns the dumped size in dwords.
2162  */
2163 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2164 										   struct ecore_ptt *p_ptt,
2165 										   u32 *dump_buf,
2166 										   bool dump,
2167 										   u8 num_specific_global_params)
2168 {
2169 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2170 	u32 offset = 0;
2171 	u8 num_params;
2172 
2173 	/* Dump global params section header */
2174 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2175 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2176 
2177 	/* Store params */
2178 	offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2179 	offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2180 	offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2181 	offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2182 	offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2183 	offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2184 
2185 	return offset;
2186 }
2187 
2188 /* Writes the "last" section (including CRC) to the specified buffer at the
2189  * given offset. Returns the dumped size in dwords.
2190  */
2191 static u32 ecore_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2192 {
2193 	u32 start_offset = offset;
2194 
2195 	/* Dump CRC section header */
2196 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2197 
2198 	/* Calculate CRC32 and add it to the dword after the "last" section */
2199 	if (dump)
2200 		*(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8*)dump_buf, DWORDS_TO_BYTES(offset));
2201 
2202 	offset++;
2203 
2204 	return offset - start_offset;
2205 }
2206 
2207 /* Update blocks reset state  */
2208 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2209 											struct ecore_ptt *p_ptt)
2210 {
2211 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2212 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2213 	u32 i;
2214 
2215 	/* Read reset registers */
2216 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2217 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2218 			reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2219 
2220 	/* Check if blocks are in reset */
2221 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2222 		struct block_defs *block = s_block_defs[i];
2223 
2224 		dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2225 	}
2226 }
2227 
2228 /* Enable / disable the Debug block */
2229 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2230 									   struct ecore_ptt *p_ptt,
2231 									   bool enable)
2232 {
2233 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2234 }
2235 
2236 /* Resets the Debug block */
2237 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2238 									  struct ecore_ptt *p_ptt)
2239 {
2240 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2241 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2242 
2243 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2244 	old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2245 	new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2246 
2247 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2248 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2249 }
2250 
2251 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2252 									   struct ecore_ptt *p_ptt,
2253 									   enum dbg_bus_frame_modes mode)
2254 {
2255 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2256 }
2257 
2258 /* Enable / disable Debug Bus clients according to the specified mask
2259  * (1 = enable, 0 = disable).
2260  */
2261 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2262 									 struct ecore_ptt *p_ptt,
2263 									 u32 client_mask)
2264 {
2265 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2266 }
2267 
2268 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2269 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2270 								   struct ecore_ptt *p_ptt,
2271 								   enum dbg_storms storm_id,
2272 								   enum dbg_bus_filter_types filter_type)
2273 {
2274 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2275 	u32 base_addr, sem_filter_params = filter_type;
2276 	struct dbg_bus_storm_data *storm_bus;
2277 	struct storm_mode_defs *storm_mode;
2278 	struct storm_defs *storm;
2279 
2280 	storm = &s_storm_defs[storm_id];
2281 	storm_bus = &dev_data->bus.storms[storm_id];
2282 	storm_mode = &s_storm_mode_defs[storm_bus->mode];
2283 	base_addr = storm->sem_fast_mem_addr;
2284 
2285 	/* Config SEM */
2286 	if (storm_mode->is_fast_dbg) {
2287 
2288 		/* Enable fast debug */
2289 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2290 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2291 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2292 
2293 		/* Enable all messages except STORE. Must be done after
2294 		 * enabling SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2295 		 * be dropped after the SEMI sync fifo is filled.
2296 		 */
2297 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE);
2298 	}
2299 	else {
2300 
2301 		/* Ensable slow debug */
2302 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2303 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2304 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2305 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2306 	}
2307 
2308 	/* Config SEM cid filter */
2309 	if (storm_bus->cid_filter_en) {
2310 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2311 		sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2312 	}
2313 
2314 	/* Config SEM eid filter */
2315 	if (storm_bus->eid_filter_en) {
2316 		const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2317 
2318 		if (storm_bus->eid_range_not_mask) {
2319 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2320 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2321 			sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2322 		}
2323 		else {
2324 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2325 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2326 			sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2327 		}
2328 	}
2329 
2330 	/* Config accumulaed SEM filter parameters (if any) */
2331 	if (sem_filter_params)
2332 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2333 }
2334 
2335 /* Disables Debug Bus block inputs */
2336 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2337 												struct ecore_ptt *p_ptt,
2338 												bool empty_semi_fifos)
2339 {
2340 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2341 	u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2342 	bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2343 	u32 block_id;
2344 
2345 	/* Disable messages output in all Storms */
2346 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2347 		struct storm_defs *storm = &s_storm_defs[storm_id];
2348 
2349 		if (!dev_data->block_in_reset[storm->block_id])
2350 			ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE);
2351 	}
2352 
2353 	/* Try to empty the SEMI sync fifo. Must be done after messages output
2354 	 * were disabled in all Storms (i.e. SEM_FAST_REG_DBG_MODE6_SRC_DISABLE
2355 	 * was set to all 1's.
2356 	 */
2357 	while (num_fifos_to_empty) {
2358 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2359 			struct storm_defs *storm = &s_storm_defs[storm_id];
2360 
2361 			if (is_fifo_empty[storm_id])
2362 				continue;
2363 
2364 			/* Check if sync fifo got empty */
2365 			if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2366 				is_fifo_empty[storm_id] = true;
2367 				num_fifos_to_empty--;
2368 			}
2369 		}
2370 
2371 		/* Check if need to continue polling */
2372 		if (num_fifos_to_empty) {
2373 			u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2374 			u32 polling_count = 0;
2375 
2376 			if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2377 				OSAL_MSLEEP(polling_ms);
2378 				polling_count++;
2379 			}
2380 			else {
2381 				DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2382 				break;
2383 			}
2384 		}
2385 	}
2386 
2387 	/* Disable debug in all Storms */
2388 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2389 		struct storm_defs *storm = &s_storm_defs[storm_id];
2390 		u32 base_addr = storm->sem_fast_mem_addr;
2391 
2392 		if (dev_data->block_in_reset[storm->block_id])
2393 			continue;
2394 
2395 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2396 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2397 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2398 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2399 	}
2400 
2401 	/* Disable all clients */
2402 	ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2403 
2404 	/* Disable all blocks */
2405 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2406 		struct block_defs *block = s_block_defs[block_id];
2407 
2408 		if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2409 			ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2410 	}
2411 
2412 	/* Disable timestamp */
2413 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2414 
2415 	/* Disable filters and triggers */
2416 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2417 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2418 
2419 	return DBG_STATUS_OK;
2420 }
2421 
2422 /* Sets a Debug Bus trigger/filter constraint */
2423 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2424 									 struct ecore_ptt *p_ptt,
2425 									 bool is_filter,
2426 									 u8 constraint_id,
2427 									 u8 hw_op_val,
2428 									 u32 data_val,
2429 									 u32 data_mask,
2430 									 u8 frame_bit,
2431 									 u8 frame_mask,
2432 									 u16 dword_offset,
2433 									 u16 range,
2434 									 u8 cyclic_bit,
2435 									 u8 must_bit)
2436 {
2437 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2438 	u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2439 	u8 curr_trigger_state;
2440 
2441 	/* For trigger only - set register offset according to state */
2442 	if (!is_filter) {
2443 		curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2444 		reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2445 	}
2446 
2447 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2448 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2449 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2450 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2451 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2452 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2453 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2454 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2455 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2456 }
2457 
2458 /* Reads the specified DBG Bus internal buffer range and copy it to the
2459  * specified buffer. Returns the dumped size in dwords.
2460  */
2461 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2462 										struct ecore_ptt *p_ptt,
2463 										u32 *dump_buf,
2464 										bool dump,
2465 										u32 start_line,
2466 										u32 end_line)
2467 {
2468 	u32 line, reg_addr, i, offset = 0;
2469 
2470 	if (!dump)
2471 		return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2472 
2473 	for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2474 		line <= end_line;
2475 		line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2476 		for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2477 			dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2478 
2479 	return offset;
2480 }
2481 
2482 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2483  * Returns the dumped size in dwords.
2484  */
2485 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2486 								  struct ecore_ptt *p_ptt,
2487 								  u32 *dump_buf,
2488 								  bool dump)
2489 {
2490 	u32 last_written_line, offset = 0;
2491 
2492 	last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2493 
2494 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2495 
2496 		/* Internal buffer was wrapped: first dump from write pointer
2497 		 * to buffer end, then dump from buffer start to write pointer.
2498 		 */
2499 		if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2500 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2501 		offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2502 	}
2503 	else if (last_written_line) {
2504 
2505 		/* Internal buffer wasn't wrapped: dump from buffer start until
2506 		 *  write pointer.
2507 		 */
2508 		if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2509 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2510 		else
2511 			DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2512 	}
2513 
2514 	return offset;
2515 }
2516 
2517 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2518  * buffer. Returns the dumped size in dwords.
2519  */
2520 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2521 										u32 *dump_buf,
2522 										bool dump,
2523 										u32 start_line,
2524 										u32 end_line)
2525 {
2526 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2527 	u32 offset = 0;
2528 
2529 	/* Extract PCI buffer pointer from virtual address */
2530 	void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2531 	u32 *pci_buf_start = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2532 	u32 *pci_buf, line, i;
2533 
2534 	if (!dump)
2535 		return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2536 
2537 	for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2538 	line <= end_line;
2539 		line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2540 		for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2541 			dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2542 
2543 	return offset;
2544 }
2545 
2546 /* Copies the DBG Bus PCI buffer to the specified buffer.
2547  * Returns the dumped size in dwords.
2548  */
2549 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2550 								  struct ecore_ptt *p_ptt,
2551 								  u32 *dump_buf,
2552 								  bool dump)
2553 {
2554 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2555 	u32 next_wr_byte_offset, next_wr_line_offset;
2556 	struct dbg_bus_mem_addr next_wr_phys_addr;
2557 	u32 pci_buf_size_in_lines, offset = 0;
2558 
2559 	pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2560 
2561 	/* Extract write pointer (physical address) */
2562 	next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2563 	next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2564 
2565 	/* Convert write pointer to offset */
2566 	next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2567 	if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2568 		return 0;
2569 	next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2570 
2571 	/* PCI buffer wrapped: first dump from write pointer to buffer end. */
2572 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2573 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2574 
2575 	/* Dump from buffer start until write pointer */
2576 	if (next_wr_line_offset)
2577 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2578 
2579 	return offset;
2580 }
2581 
2582 /* Copies the DBG Bus recorded data to the specified buffer.
2583  * Returns the dumped size in dwords.
2584  */
2585 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2586 							   struct ecore_ptt *p_ptt,
2587 							   u32 *dump_buf,
2588 							   bool dump)
2589 {
2590 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2591 
2592 	switch (dev_data->bus.target) {
2593 	case DBG_BUS_TARGET_ID_INT_BUF:
2594 		return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2595 	case DBG_BUS_TARGET_ID_PCI:
2596 		return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2597 	default:
2598 		break;
2599 	}
2600 
2601 	return 0;
2602 }
2603 
2604 /* Frees the Debug Bus PCI buffer */
2605 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2606 {
2607 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2608 	dma_addr_t pci_buf_phys_addr;
2609 	void *virt_addr_lo;
2610 	u32 *pci_buf;
2611 
2612 	/* Extract PCI buffer pointer from virtual address */
2613 	virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2614 	pci_buf = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2615 
2616 	if (!dev_data->bus.pci_buf.size)
2617 		return;
2618 
2619 	OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2620 
2621 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2622 
2623 	dev_data->bus.pci_buf.size = 0;
2624 }
2625 
2626 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2627  * Returns the dumped size in dwords.
2628  */
2629 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2630 								 u32 *dump_buf,
2631 								 bool dump)
2632 {
2633 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2634 	char storm_name[8] = "?storm";
2635 	u32 block_id, offset = 0;
2636 	u8 storm_id;
2637 
2638 	/* Store storms */
2639 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2640 		struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2641 		struct storm_defs *storm = &s_storm_defs[storm_id];
2642 
2643 		if (!dev_data->bus.storms[storm_id].enabled)
2644 			continue;
2645 
2646 		/* Dump section header */
2647 		storm_name[0] = storm->letter;
2648 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2649 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2650 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2651 		offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2652 	}
2653 
2654 	/* Store blocks */
2655 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2656 		struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2657 		struct block_defs *block = s_block_defs[block_id];
2658 
2659 		if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2660 			continue;
2661 
2662 		/* Dump section header */
2663 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2664 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2665 		offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2666 		offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2667 		offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2668 	}
2669 
2670 	return offset;
2671 }
2672 
2673 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2674  * buffer. Returns the dumped size in dwords.
2675  */
2676 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2677 							  struct ecore_ptt *p_ptt,
2678 							  u32 *dump_buf,
2679 							  bool dump)
2680 {
2681 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2682 	char hw_id_mask_str[16];
2683 	u32 offset = 0;
2684 
2685 	if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2686 		DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2687 
2688 	/* Dump global params */
2689 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2690 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2691 	offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2692 	offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2693 	offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2694 	offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2695 
2696 	offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2697 
2698 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2699 		u32 recorded_dwords = 0;
2700 
2701 		if (dump)
2702 			recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2703 
2704 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2705 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2706 	}
2707 
2708 	return offset;
2709 }
2710 
2711 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2712 								u16 *modes_buf_offset)
2713 {
2714 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2715 	bool arg1, arg2;
2716 	u8 tree_val;
2717 
2718 	/* Get next element from modes tree buffer */
2719 	tree_val = ((u8*)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2720 
2721 	switch (tree_val) {
2722 	case INIT_MODE_OP_NOT:
2723 		return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2724 	case INIT_MODE_OP_OR:
2725 	case INIT_MODE_OP_AND:
2726 		arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2727 		arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2728 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2729 	default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2730 	}
2731 }
2732 
2733 /* Returns true if the specified entity (indicated by GRC param) should be
2734  * included in the dump, false otherwise.
2735  */
2736 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2737 								  enum dbg_grc_params grc_param)
2738 {
2739 	return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2740 }
2741 
2742 /* Returns true of the specified Storm should be included in the dump, false
2743  * otherwise.
2744  */
2745 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2746 										enum dbg_storms storm)
2747 {
2748 	return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2749 }
2750 
2751 /* Returns true if the specified memory should be included in the dump, false
2752  * otherwise.
2753  */
2754 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2755 									  enum block_id block_id,
2756 									  u8 mem_group_id)
2757 {
2758 	struct block_defs *block = s_block_defs[block_id];
2759 	u8 i;
2760 
2761 	/* Check Storm match */
2762 	if (block->associated_to_storm &&
2763 		!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2764 		return false;
2765 
2766 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2767 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2768 
2769 		if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2770 			return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2771 	}
2772 
2773 	switch (mem_group_id) {
2774 	case MEM_GROUP_PXP_ILT:
2775 	case MEM_GROUP_PXP_MEM:
2776 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2777 	case MEM_GROUP_RAM:
2778 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2779 	case MEM_GROUP_PBUF:
2780 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2781 	case MEM_GROUP_CAU_MEM:
2782 	case MEM_GROUP_CAU_SB:
2783 	case MEM_GROUP_CAU_PI:
2784 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2785 	case MEM_GROUP_QM_MEM:
2786 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2787 	case MEM_GROUP_CFC_MEM:
2788 	case MEM_GROUP_CONN_CFC_MEM:
2789 	case MEM_GROUP_TASK_CFC_MEM:
2790 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2791 	case MEM_GROUP_IGU_MEM:
2792 	case MEM_GROUP_IGU_MSIX:
2793 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2794 	case MEM_GROUP_MULD_MEM:
2795 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2796 	case MEM_GROUP_PRS_MEM:
2797 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2798 	case MEM_GROUP_DMAE_MEM:
2799 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2800 	case MEM_GROUP_TM_MEM:
2801 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2802 	case MEM_GROUP_SDM_MEM:
2803 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2804 	case MEM_GROUP_TDIF_CTX:
2805 	case MEM_GROUP_RDIF_CTX:
2806 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2807 	case MEM_GROUP_CM_MEM:
2808 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2809 	case MEM_GROUP_IOR:
2810 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2811 	default:
2812 		return true;
2813 	}
2814 }
2815 
2816 /* Stalls all Storms */
2817 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2818 								   struct ecore_ptt *p_ptt,
2819 								   bool stall)
2820 {
2821 	u32 reg_addr;
2822 	u8 storm_id;
2823 
2824 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2825 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2826 			continue;
2827 
2828 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2829 		ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2830 	}
2831 
2832 	OSAL_MSLEEP(STALL_DELAY_MS);
2833 }
2834 
2835 /* Takes all blocks out of reset */
2836 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2837 									 struct ecore_ptt *p_ptt)
2838 {
2839 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2840 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2841 	u32 block_id, i;
2842 
2843 	/* Fill reset regs values */
2844 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2845 		struct block_defs *block = s_block_defs[block_id];
2846 
2847 		if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2848 			reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2849 	}
2850 
2851 	/* Write reset registers */
2852 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2853 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2854 			continue;
2855 
2856 		reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2857 
2858 		if (reg_val[i])
2859 			ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2860 	}
2861 }
2862 
2863 /* Returns the attention block data of the specified block */
2864 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2865 																		enum dbg_attn_type attn_type)
2866 {
2867 	const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block*)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2868 
2869 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2870 }
2871 
2872 /* Returns the attention registers of the specified block */
2873 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2874 															enum dbg_attn_type attn_type,
2875 															u8 *num_attn_regs)
2876 {
2877 	const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2878 
2879 	*num_attn_regs = block_type_data->num_regs;
2880 
2881 	return &((const struct dbg_attn_reg*)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2882 }
2883 
2884 /* For each block, clear the status of all parities */
2885 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2886 									 struct ecore_ptt *p_ptt)
2887 {
2888 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2889 	const struct dbg_attn_reg *attn_reg_arr;
2890 	u8 reg_idx, num_attn_regs;
2891 	u32 block_id;
2892 
2893 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2894 		if (dev_data->block_in_reset[block_id])
2895 			continue;
2896 
2897 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2898 
2899 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2900 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2901 			u16 modes_buf_offset;
2902 			bool eval_mode;
2903 
2904 			/* Check mode */
2905 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2906 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2907 
2908 			/* If Mode match: clear parity status */
2909 			if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2910 				ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2911 		}
2912 	}
2913 }
2914 
2915 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2916  * the following parameters are dumped:
2917  * - count:	 no. of dumped entries
2918  * - split:	 split type
2919  * - id:	 split ID (dumped only if split_id >= 0)
2920  * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2921  *		 and param_val != OSAL_NULL).
2922  */
2923 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2924 								   bool dump,
2925 								   u32 num_reg_entries,
2926 								   const char *split_type,
2927 								   int split_id,
2928 								   const char *param_name,
2929 								   const char *param_val)
2930 {
2931 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2932 	u32 offset = 0;
2933 
2934 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2935 	offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2936 	offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2937 	if (split_id >= 0)
2938 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2939 	if (param_name && param_val)
2940 		offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2941 
2942 	return offset;
2943 }
2944 
2945 /* Dumps the GRC registers in the specified address range.
2946  * Returns the dumped size in dwords.
2947  * The addr and len arguments are specified in dwords.
2948  */
2949 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2950 				     struct ecore_ptt *p_ptt,
2951 				     u32 *dump_buf,
2952 				     bool dump,
2953 				     u32 addr,
2954 				     u32 len,
2955 				     bool OSAL_UNUSED wide_bus)
2956 {
2957 	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2958 
2959 	if (!dump)
2960 		return len;
2961 
2962 	for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2963 		*(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, byte_addr);
2964 
2965 	return offset;
2966 }
2967 
2968 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2969  * The addr and len arguments are specified in dwords.
2970  */
2971 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
2972 										bool dump,
2973 										u32 addr,
2974 										u32 len)
2975 {
2976 	if (dump)
2977 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2978 
2979 	return 1;
2980 }
2981 
2982 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2983  * The addr and len arguments are specified in dwords.
2984  */
2985 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
2986 				    struct ecore_ptt *p_ptt,
2987 				    u32 *dump_buf,
2988 				    bool dump,
2989 				    u32 addr,
2990 				    u32 len,
2991 				    bool OSAL_UNUSED wide_bus)
2992 {
2993 	u32 offset = 0;
2994 
2995 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2996 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
2997 
2998 	return offset;
2999 }
3000 
3001 /* Dumps GRC registers sequence with skip cycle.
3002  * Returns the dumped size in dwords.
3003  * - addr:	start GRC address in dwords
3004  * - total_len:	total no. of dwords to dump
3005  * - read_len:	no. consecutive dwords to read
3006  * - skip_len:	no. of dwords to skip (and fill with zeros)
3007  */
3008 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3009 										 struct ecore_ptt *p_ptt,
3010 										 u32 *dump_buf,
3011 										 bool dump,
3012 										 u32 addr,
3013 										 u32 total_len,
3014 										 u32 read_len,
3015 										 u32 skip_len)
3016 {
3017 	u32 offset = 0, reg_offset = 0;
3018 
3019 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3020 
3021 	if (!dump)
3022 		return offset + total_len;
3023 
3024 	while (reg_offset < total_len) {
3025 		u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3026 
3027 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3028 		reg_offset += curr_len;
3029 		addr += curr_len;
3030 
3031 		if (reg_offset < total_len) {
3032 			curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3033 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3034 			offset += curr_len;
3035 			reg_offset += curr_len;
3036 			addr += curr_len;
3037 		}
3038 	}
3039 
3040 	return offset;
3041 }
3042 
3043 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3044 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3045 									   struct ecore_ptt *p_ptt,
3046 									   struct dbg_array input_regs_arr,
3047 									   u32 *dump_buf,
3048 									   bool dump,
3049 									   bool block_enable[MAX_BLOCK_ID],
3050 									   u32 *num_dumped_reg_entries)
3051 {
3052 	u32 i, offset = 0, input_offset = 0;
3053 	bool mode_match = true;
3054 
3055 	*num_dumped_reg_entries = 0;
3056 
3057 	while (input_offset < input_regs_arr.size_in_dwords) {
3058 		const struct dbg_dump_cond_hdr* cond_hdr = (const struct dbg_dump_cond_hdr*)&input_regs_arr.ptr[input_offset++];
3059 		u16 modes_buf_offset;
3060 		bool eval_mode;
3061 
3062 		/* Check mode/block */
3063 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3064 		if (eval_mode) {
3065 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3066 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3067 		}
3068 
3069 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
3070 			input_offset += cond_hdr->data_size;
3071 			continue;
3072 		}
3073 
3074 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3075 			const struct dbg_dump_reg *reg = (const struct dbg_dump_reg*)&input_regs_arr.ptr[input_offset];
3076 
3077 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3078 							   GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3079 							   GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3080 							   GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3081 			(*num_dumped_reg_entries)++;
3082 		}
3083 	}
3084 
3085 	return offset;
3086 }
3087 
3088 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3089 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3090 									 struct ecore_ptt *p_ptt,
3091 									 struct dbg_array input_regs_arr,
3092 									 u32 *dump_buf,
3093 									 bool dump,
3094 									 bool block_enable[MAX_BLOCK_ID],
3095 									 const char *split_type_name,
3096 									 u32 split_id,
3097 									 const char *param_name,
3098 									 const char *param_val)
3099 {
3100 	u32 num_dumped_reg_entries, offset;
3101 
3102 	/* Calculate register dump header size (and skip it for now) */
3103 	offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3104 
3105 	/* Dump registers */
3106 	offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3107 
3108 	/* Write register dump header */
3109 	if (dump && num_dumped_reg_entries > 0)
3110 		ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3111 
3112 	return num_dumped_reg_entries > 0 ? offset : 0;
3113 }
3114 
3115 /* Dumps registers according to the input registers array. Returns the dumped
3116  * size in dwords.
3117  */
3118 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3119 									struct ecore_ptt *p_ptt,
3120 									u32 *dump_buf,
3121 									bool dump,
3122 									bool block_enable[MAX_BLOCK_ID],
3123 									const char *param_name,
3124 									const char *param_val)
3125 {
3126 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3127 	struct chip_platform_defs *chip_platform;
3128 	u32 offset = 0, input_offset = 0;
3129 	u8 port_id, pf_id, vf_id;
3130 
3131 	chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3132 
3133 	if (dump)
3134 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping registers...\n");
3135 
3136 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3137 		const struct dbg_dump_split_hdr *split_hdr;
3138 		struct dbg_array curr_input_regs_arr;
3139 		u32 split_data_size;
3140 		u8 split_type_id;
3141 
3142 		split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3143 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3144 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3145 		curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3146 		curr_input_regs_arr.size_in_dwords = split_data_size;
3147 
3148 		switch(split_type_id) {
3149 		case SPLIT_TYPE_NONE:
3150 			offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3151 			break;
3152 
3153 		case SPLIT_TYPE_PORT:
3154 			for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3155 				if (dump)
3156 					ecore_port_pretend(p_hwfn, p_ptt, port_id);
3157 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3158 			}
3159 			break;
3160 
3161 		case SPLIT_TYPE_PF:
3162 		case SPLIT_TYPE_PORT_PF:
3163 			for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3164 				if (dump)
3165 					ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3166 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3167 			}
3168 			break;
3169 
3170 		case SPLIT_TYPE_VF:
3171 			for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3172 				if (dump)
3173 					ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3174 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3175 			}
3176 			break;
3177 
3178 		default:
3179 			break;
3180 		}
3181 
3182 		input_offset += split_data_size;
3183 	}
3184 
3185 	/* Pretend to original PF */
3186 	if (dump)
3187 		ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3188 
3189 	return offset;
3190 }
3191 
3192 /* Dump reset registers. Returns the dumped size in dwords. */
3193 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3194 	struct ecore_ptt *p_ptt,
3195 	u32 *dump_buf,
3196 	bool dump)
3197 {
3198 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3199 	u32 i, offset = 0, num_regs = 0;
3200 
3201 	/* Calculate header size */
3202 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3203 
3204 	/* Write reset registers */
3205 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3206 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3207 			continue;
3208 
3209 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3210 		num_regs++;
3211 	}
3212 
3213 	/* Write header */
3214 	if (dump)
3215 		ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3216 
3217 	return offset;
3218 }
3219 
3220 /* Dump registers that are modified during GRC Dump and therefore must be
3221  * dumped first. Returns the dumped size in dwords.
3222  */
3223 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3224 										struct ecore_ptt *p_ptt,
3225 										u32 *dump_buf,
3226 										bool dump)
3227 {
3228 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3229 	u32 block_id, offset = 0, num_reg_entries = 0;
3230 	const struct dbg_attn_reg *attn_reg_arr;
3231 	u8 storm_id, reg_idx, num_attn_regs;
3232 
3233 	/* Calculate header size */
3234 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3235 
3236 	/* Write parity registers */
3237 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3238 		if (dev_data->block_in_reset[block_id] && dump)
3239 			continue;
3240 
3241 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3242 
3243 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3244 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3245 			u16 modes_buf_offset;
3246 			bool eval_mode;
3247 
3248 			/* Check mode */
3249 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3250 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3251 			if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3252 				continue;
3253 
3254 			/* Mode match: read & dump registers */
3255 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3256 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3257 			num_reg_entries += 2;
3258 		}
3259 	}
3260 
3261 	/* Write Storm stall status registers */
3262 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3263 		struct storm_defs *storm = &s_storm_defs[storm_id];
3264 
3265 		if (dev_data->block_in_reset[storm->block_id] && dump)
3266 			continue;
3267 
3268 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3269 			BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3270 		num_reg_entries++;
3271 	}
3272 
3273 	/* Write header */
3274 	if (dump)
3275 		ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3276 
3277 	return offset;
3278 }
3279 
3280 /* Dumps registers that can't be represented in the debug arrays */
3281 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3282 									   struct ecore_ptt *p_ptt,
3283 									   u32 *dump_buf,
3284 									   bool dump)
3285 {
3286 	u32 offset = 0;
3287 
3288 	offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3289 
3290 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3291 	 * skipped).
3292 	 */
3293 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3294 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3295 
3296 	return offset;
3297 }
3298 
3299 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3300  * dwords. The following parameters are dumped:
3301  * - name:	   dumped only if it's not OSAL_NULL.
3302  * - addr:	   in dwords, dumped only if name is OSAL_NULL.
3303  * - len:	   in dwords, always dumped.
3304  * - width:	   dumped if it's not zero.
3305  * - packed:	   dumped only if it's not false.
3306  * - mem_group:	   always dumped.
3307  * - is_storm:	   true only if the memory is related to a Storm.
3308  * - storm_letter: valid only if is_storm is true.
3309  *
3310  */
3311 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3312 								  u32 *dump_buf,
3313 								  bool dump,
3314 								  const char *name,
3315 								  u32 addr,
3316 								  u32 len,
3317 								  u32 bit_width,
3318 								  bool packed,
3319 								  const char *mem_group,
3320 								  bool is_storm,
3321 								  char storm_letter)
3322 {
3323 	u8 num_params = 3;
3324 	u32 offset = 0;
3325 	char buf[64];
3326 
3327 	if (!len)
3328 		DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3329 
3330 	if (bit_width)
3331 		num_params++;
3332 	if (packed)
3333 		num_params++;
3334 
3335 	/* Dump section header */
3336 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3337 
3338 	if (name) {
3339 
3340 		/* Dump name */
3341 		if (is_storm) {
3342 			OSAL_STRCPY(buf, "?STORM_");
3343 			buf[0] = storm_letter;
3344 			OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3345 		}
3346 		else {
3347 			OSAL_STRCPY(buf, name);
3348 		}
3349 
3350 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3351 		if (dump)
3352 			DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from %s...\n", len, buf);
3353 	}
3354 	else {
3355 
3356 		/* Dump address */
3357 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3358 
3359 		offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3360 		if (dump && len > 64)
3361 			DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", len, addr_in_bytes);
3362 	}
3363 
3364 	/* Dump len */
3365 	offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3366 
3367 	/* Dump bit width */
3368 	if (bit_width)
3369 		offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3370 
3371 	/* Dump packed */
3372 	if (packed)
3373 		offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3374 
3375 	/* Dump reg type */
3376 	if (is_storm) {
3377 		OSAL_STRCPY(buf, "?STORM_");
3378 		buf[0] = storm_letter;
3379 		OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3380 	}
3381 	else {
3382 		OSAL_STRCPY(buf, mem_group);
3383 	}
3384 
3385 	offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3386 
3387 	return offset;
3388 }
3389 
3390 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3391  * Returns the dumped size in dwords.
3392  * The addr and len arguments are specified in dwords.
3393  */
3394 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3395 			      struct ecore_ptt *p_ptt,
3396 			      u32 *dump_buf,
3397 			      bool dump,
3398 			      const char *name,
3399 			      u32 addr,
3400 			      u32 len,
3401 			      bool wide_bus,
3402 			      u32 bit_width,
3403 			      bool packed,
3404 			      const char *mem_group,
3405 			      bool is_storm,
3406 			      char storm_letter)
3407 {
3408 	u32 offset = 0;
3409 
3410 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3411 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3412 
3413 	return offset;
3414 }
3415 
3416 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3417 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3418 									  struct ecore_ptt *p_ptt,
3419 									  struct dbg_array input_mems_arr,
3420 									  u32 *dump_buf,
3421 									  bool dump)
3422 {
3423 	u32 i, offset = 0, input_offset = 0;
3424 	bool mode_match = true;
3425 
3426 	while (input_offset < input_mems_arr.size_in_dwords) {
3427 		const struct dbg_dump_cond_hdr* cond_hdr;
3428 		u16 modes_buf_offset;
3429 		u32 num_entries;
3430 		bool eval_mode;
3431 
3432 		cond_hdr = (const struct dbg_dump_cond_hdr*)&input_mems_arr.ptr[input_offset++];
3433 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3434 
3435 		/* Check required mode */
3436 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3437 		if (eval_mode) {
3438 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3439 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3440 		}
3441 
3442 		if (!mode_match) {
3443 			input_offset += cond_hdr->data_size;
3444 			continue;
3445 		}
3446 
3447 		for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3448 			const struct dbg_dump_mem *mem = (const struct dbg_dump_mem*)&input_mems_arr.ptr[input_offset];
3449 			u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3450 			bool is_storm = false, mem_wide_bus;
3451 			char storm_letter = 'a';
3452 			u32 mem_addr, mem_len;
3453 
3454 			if (mem_group_id >= MEM_GROUPS_NUM) {
3455 				DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3456 				return 0;
3457 			}
3458 
3459 			if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3460 				continue;
3461 
3462 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3463 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3464 			mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3465 
3466 			/* Update memory length for CCFC/TCFC memories
3467 			 * according to number of LCIDs/LTIDs.
3468 			 */
3469 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3470 				if (mem_len % MAX_LCIDS) {
3471 					DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3472 					return 0;
3473 				}
3474 
3475 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3476 			}
3477 			else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3478 				if (mem_len % MAX_LTIDS) {
3479 					DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3480 					return 0;
3481 				}
3482 
3483 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3484 			}
3485 
3486 			/* If memory is associated with Storm, udpate Storm
3487 			 * details.
3488 			 */
3489 			if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3490 				is_storm = true;
3491 				storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3492 			}
3493 
3494 			/* Dump memory */
3495 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3496 											0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3497 		}
3498 	}
3499 
3500 	return offset;
3501 }
3502 
3503 /* Dumps GRC memories according to the input array dump_mem.
3504  * Returns the dumped size in dwords.
3505  */
3506 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3507 								   struct ecore_ptt *p_ptt,
3508 								   u32 *dump_buf,
3509 								   bool dump)
3510 {
3511 	u32 offset = 0, input_offset = 0;
3512 
3513 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3514 		const struct dbg_dump_split_hdr *split_hdr;
3515 		struct dbg_array curr_input_mems_arr;
3516 		u32 split_data_size;
3517 		u8 split_type_id;
3518 
3519 		split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3520 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3521 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3522 		curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3523 		curr_input_mems_arr.size_in_dwords = split_data_size;
3524 
3525 		switch (split_type_id) {
3526 		case SPLIT_TYPE_NONE:
3527 			offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3528 			break;
3529 
3530 		default:
3531 			DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3532 			break;
3533 		}
3534 
3535 		input_offset += split_data_size;
3536 	}
3537 
3538 	return offset;
3539 }
3540 
3541 /* Dumps GRC context data for the specified Storm.
3542  * Returns the dumped size in dwords.
3543  * The lid_size argument is specified in quad-regs.
3544  */
3545 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3546 								   struct ecore_ptt *p_ptt,
3547 								   u32 *dump_buf,
3548 								   bool dump,
3549 								   const char *name,
3550 								   u32 num_lids,
3551 								   u32 lid_size,
3552 								   u32 rd_reg_addr,
3553 								   u8 storm_id)
3554 {
3555 	struct storm_defs *storm = &s_storm_defs[storm_id];
3556 	u32 i, lid, total_size, offset = 0;
3557 
3558 	if (!lid_size)
3559 		return 0;
3560 
3561 	lid_size *= BYTES_IN_DWORD;
3562 	total_size = num_lids * lid_size;
3563 
3564 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3565 
3566 	if (!dump)
3567 		return offset + total_size;
3568 
3569 	/* Dump context data */
3570 	for (lid = 0; lid < num_lids; lid++) {
3571 		for (i = 0; i < lid_size; i++, offset++) {
3572 			ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3573 			*(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3574 		}
3575 	}
3576 
3577 	return offset;
3578 }
3579 
3580 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3581 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3582 							  struct ecore_ptt *p_ptt,
3583 							  u32 *dump_buf,
3584 							  bool dump)
3585 {
3586 	u32 offset = 0;
3587 	u8 storm_id;
3588 
3589 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3590 		struct storm_defs *storm = &s_storm_defs[storm_id];
3591 
3592 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3593 			continue;
3594 
3595 		/* Dump Conn AG context size */
3596 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3597 			storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3598 
3599 		/* Dump Conn ST context size */
3600 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3601 			storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3602 
3603 		/* Dump Task AG context size */
3604 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3605 			storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3606 
3607 		/* Dump Task ST context size */
3608 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3609 			storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3610 	}
3611 
3612 	return offset;
3613 }
3614 
3615 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3616 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3617 							   struct ecore_ptt *p_ptt,
3618 							   u32 *dump_buf,
3619 							   bool dump)
3620 {
3621 	char buf[10] = "IOR_SET_?";
3622 	u32 addr, offset = 0;
3623 	u8 storm_id, set_id;
3624 
3625 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3626 		struct storm_defs *storm = &s_storm_defs[storm_id];
3627 
3628 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3629 			continue;
3630 
3631 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3632 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3633 			buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3634 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3635 		}
3636 	}
3637 
3638 	return offset;
3639 }
3640 
3641 /* Dump VFC CAM. Returns the dumped size in dwords. */
3642 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3643 								  struct ecore_ptt *p_ptt,
3644 								  u32 *dump_buf,
3645 								  bool dump,
3646 								  u8 storm_id)
3647 {
3648 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3649 	struct storm_defs *storm = &s_storm_defs[storm_id];
3650 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3651 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3652 	u32 row, i, offset = 0;
3653 
3654 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3655 
3656 	if (!dump)
3657 		return offset + total_size;
3658 
3659 	/* Prepare CAM address */
3660 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3661 
3662 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3663 
3664 		/* Write VFC CAM command */
3665 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3666 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3667 
3668 		/* Write VFC CAM address */
3669 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3670 
3671 		/* Read VFC CAM read response */
3672 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3673 	}
3674 
3675 	return offset;
3676 }
3677 
3678 /* Dump VFC RAM. Returns the dumped size in dwords. */
3679 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3680 								  struct ecore_ptt *p_ptt,
3681 								  u32 *dump_buf,
3682 								  bool dump,
3683 								  u8 storm_id,
3684 								  struct vfc_ram_defs *ram_defs)
3685 {
3686 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3687 	struct storm_defs *storm = &s_storm_defs[storm_id];
3688 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3689 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3690 	u32 row, i, offset = 0;
3691 
3692 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3693 
3694 	/* Prepare RAM address */
3695 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3696 
3697 	if (!dump)
3698 		return offset + total_size;
3699 
3700 	for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3701 
3702 		/* Write VFC RAM command */
3703 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3704 
3705 		/* Write VFC RAM address */
3706 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3707 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3708 
3709 		/* Read VFC RAM read response */
3710 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3711 	}
3712 
3713 	return offset;
3714 }
3715 
3716 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3717 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3718 							  struct ecore_ptt *p_ptt,
3719 							  u32 *dump_buf,
3720 							  bool dump)
3721 {
3722 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3723 	u8 storm_id, i;
3724 	u32 offset = 0;
3725 
3726 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3727 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3728 			!s_storm_defs[storm_id].has_vfc ||
3729 			(storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3730 			continue;
3731 
3732 		/* Read CAM */
3733 		offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3734 
3735 		/* Read RAM */
3736 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3737 			offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3738 	}
3739 
3740 	return offset;
3741 }
3742 
3743 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3744 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3745 							  struct ecore_ptt *p_ptt,
3746 							  u32 *dump_buf,
3747 							  bool dump)
3748 {
3749 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3750 	u32 offset = 0;
3751 	u8 rss_mem_id;
3752 
3753 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3754 		u32 rss_addr, num_entries, total_dwords;
3755 		struct rss_mem_defs *rss_defs;
3756 		bool packed;
3757 
3758 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3759 		rss_addr = rss_defs->addr;
3760 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3761 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3762 		packed = (rss_defs->entry_width == 16);
3763 
3764 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3765 			rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3766 
3767 		/* Dump RSS data */
3768 		if (!dump) {
3769 			offset += total_dwords;
3770 			continue;
3771 		}
3772 
3773 		while (total_dwords) {
3774 			u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3775 			ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3776 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3777 			total_dwords -= num_dwords_to_read;
3778 			rss_addr++;
3779 		}
3780 	}
3781 
3782 	return offset;
3783 }
3784 
3785 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3786 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3787 								  struct ecore_ptt *p_ptt,
3788 								  u32 *dump_buf,
3789 								  bool dump,
3790 								  u8 big_ram_id)
3791 {
3792 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3793 	u32 total_blocks, ram_size, offset = 0, i;
3794 	char mem_name[12] = "???_BIG_RAM";
3795 	char type_name[8] = "???_RAM";
3796 	struct big_ram_defs *big_ram;
3797 
3798 	big_ram = &s_big_ram_defs[big_ram_id];
3799 	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3800 	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3801 
3802 	OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3803 	OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3804 
3805 	/* Dump memory header */
3806 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0);
3807 
3808 	/* Read and dump Big RAM data */
3809 	if (!dump)
3810 		return offset + ram_size;
3811 
3812 	/* Dump Big RAM */
3813 	for (i = 0; i < total_blocks / 2; i++) {
3814 		ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3815 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), 2 * BIG_RAM_BLOCK_SIZE_DWORDS, false);
3816 	}
3817 
3818 	return offset;
3819 }
3820 
3821 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3822 							  struct ecore_ptt *p_ptt,
3823 							  u32 *dump_buf,
3824 							  bool dump)
3825 {
3826 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3827 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3828 	bool halted = false;
3829 	u32 offset = 0;
3830 
3831 	/* Halt MCP */
3832 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3833 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3834 		if (!halted)
3835 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3836 	}
3837 
3838 	/* Dump MCP scratchpad */
3839 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", false, 0);
3840 
3841 	/* Dump MCP cpu_reg_file */
3842 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3843 
3844 	/* Dump MCP registers */
3845 	block_enable[BLOCK_MCP] = true;
3846 	offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3847 
3848 	/* Dump required non-MCP registers */
3849 	offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3850 	offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3851 
3852 	/* Release MCP */
3853 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3854 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3855 
3856 	return offset;
3857 }
3858 
3859 /* Dumps the tbus indirect memory for all PHYs. */
3860 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3861 							  struct ecore_ptt *p_ptt,
3862 							  u32 *dump_buf,
3863 							  bool dump)
3864 {
3865 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3866 	char mem_name[32];
3867 	u8 phy_id;
3868 
3869 	for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3870 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3871 		struct phy_defs *phy_defs;
3872 		u8 *bytes_buf;
3873 
3874 		phy_defs = &s_phy_defs[phy_id];
3875 		addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3876 		addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3877 		data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3878 		data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3879 		bytes_buf = (u8*)(dump_buf + offset);
3880 
3881 		if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3882 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3883 
3884 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3885 
3886 		if (!dump) {
3887 			offset += PHY_DUMP_SIZE_DWORDS;
3888 			continue;
3889 		}
3890 
3891 		for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3892 			ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3893 			for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3894 				ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3895 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3896 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3897 			}
3898 		}
3899 
3900 		offset += PHY_DUMP_SIZE_DWORDS;
3901 	}
3902 
3903 	return offset;
3904 }
3905 
3906 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3907 								  struct ecore_ptt *p_ptt,
3908 								  enum block_id block_id,
3909 								  u8 line_id,
3910 								  u8 enable_mask,
3911 								  u8 right_shift,
3912 								  u8 force_valid_mask,
3913 								  u8 force_frame_mask)
3914 {
3915 	struct block_defs *block = s_block_defs[block_id];
3916 
3917 	ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3918 	ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3919 	ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3920 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3921 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3922 }
3923 
3924 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3925 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3926 									   struct ecore_ptt *p_ptt,
3927 									   u32 *dump_buf,
3928 									   bool dump)
3929 {
3930 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3931 	u32 block_id, line_id, offset = 0;
3932 
3933 	/* Skip static debug if a debug bus recording is in progress */
3934 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3935 		return 0;
3936 
3937 	if (dump) {
3938 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping static debug data...\n");
3939 
3940 		/* Disable all blocks debug output */
3941 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3942 			struct block_defs *block = s_block_defs[block_id];
3943 
3944 			if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3945 				ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3946 		}
3947 
3948 		ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3949 		ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3950 		ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3951 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3952 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3953 	}
3954 
3955 	/* Dump all static debug lines for each relevant block */
3956 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3957 		struct block_defs *block = s_block_defs[block_id];
3958 		struct dbg_bus_block *block_desc;
3959 		u32 block_dwords;
3960 
3961 		if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
3962 			continue;
3963 
3964 		block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3965 		block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
3966 
3967 		/* Dump static section params */
3968 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
3969 
3970 		if (!dump) {
3971 			offset += block_dwords;
3972 			continue;
3973 		}
3974 
3975 		/* If all lines are invalid - dump zeros */
3976 		if (dev_data->block_in_reset[block_id]) {
3977 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
3978 			offset += block_dwords;
3979 			continue;
3980 		}
3981 
3982 		/* Enable block's client */
3983 		ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
3984 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
3985 
3986 			/* Configure debug line ID */
3987 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
3988 
3989 			/* Read debug line info */
3990 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
3991 		}
3992 
3993 		/* Disable block's client and debug output */
3994 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3995 		ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3996 	}
3997 
3998 	if (dump) {
3999 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4000 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4001 	}
4002 
4003 	return offset;
4004 }
4005 
4006 /* Performs GRC Dump to the specified buffer.
4007  * Returns the dumped size in dwords.
4008  */
4009 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4010 									  struct ecore_ptt *p_ptt,
4011 									  u32 *dump_buf,
4012 									  bool dump,
4013 									  u32 *num_dumped_dwords)
4014 {
4015 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4016 	bool is_asic, parities_masked = false;
4017 	u8 i, port_mode = 0;
4018 	u32 offset = 0;
4019 
4020 	is_asic = dev_data->platform_id == PLATFORM_ASIC;
4021 
4022 	*num_dumped_dwords = 0;
4023 
4024 	if (dump) {
4025 
4026 		/* Find port mode */
4027 		switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4028 		case 0: port_mode = 1; break;
4029 		case 1: port_mode = 2; break;
4030 		case 2: port_mode = 4; break;
4031 		}
4032 
4033 		/* Update reset state */
4034 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4035 	}
4036 
4037 	/* Dump global params */
4038 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4039 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4040 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4041 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4042 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4043 
4044 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4045 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4046 		offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4047 
4048 	/* Take all blocks out of reset (using reset registers) */
4049 	if (dump) {
4050 		ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4051 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4052 	}
4053 
4054 	/* Disable all parities using MFW command */
4055 	if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4056 		parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4057 		if (!parities_masked) {
4058 			DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4059 			if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4060 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4061 		}
4062 	}
4063 
4064 	/* Dump modified registers (dumped before modifying them) */
4065 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4066 		offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4067 
4068 	/* Stall storms */
4069 	if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4070 		ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4071 
4072 	/* Dump all regs  */
4073 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4074 		bool block_enable[MAX_BLOCK_ID];
4075 
4076 		/* Dump all blocks except MCP */
4077 		for (i = 0; i < MAX_BLOCK_ID; i++)
4078 			block_enable[i] = true;
4079 		block_enable[BLOCK_MCP] = false;
4080 		offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4081 
4082 		/* Dump special registers */
4083 		offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4084 	}
4085 
4086 	/* Dump memories */
4087 	offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4088 
4089 	/* Dump MCP */
4090 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4091 		offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4092 
4093 	/* Dump context */
4094 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4095 		offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4096 
4097 	/* Dump RSS memories */
4098 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4099 		offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4100 
4101 	/* Dump Big RAM */
4102 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4103 		if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4104 			offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4105 
4106 	/* Dump IORs */
4107 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4108 		offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4109 
4110 	/* Dump VFC */
4111 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4112 		offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4113 
4114 	/* Dump PHY tbus */
4115 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4116 		offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4117 
4118 	/* Dump static debug data  */
4119 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4120 		offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4121 
4122 	/* Dump last section */
4123 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4124 
4125 	if (dump) {
4126 
4127 		/* Unstall storms */
4128 		if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4129 			ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4130 
4131 		/* Clear parity status */
4132 		if (is_asic)
4133 			ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4134 
4135 		/* Enable all parities using MFW command */
4136 		if (parities_masked)
4137 			ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4138 	}
4139 
4140 	*num_dumped_dwords = offset;
4141 
4142 	return DBG_STATUS_OK;
4143 }
4144 
4145 /* Writes the specified failing Idle Check rule to the specified buffer.
4146  * Returns the dumped size in dwords.
4147  */
4148 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4149 									   struct ecore_ptt *p_ptt,
4150 									   u32 *dump_buf,
4151 									   bool dump,
4152 									   u16 rule_id,
4153 									   const struct dbg_idle_chk_rule *rule,
4154 									   u16 fail_entry_id,
4155 									   u32 *cond_reg_values)
4156 {
4157 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4158 	const struct dbg_idle_chk_cond_reg *cond_regs;
4159 	const struct dbg_idle_chk_info_reg *info_regs;
4160 	u32 i, next_reg_offset = 0, offset = 0;
4161 	struct dbg_idle_chk_result_hdr *hdr;
4162 	const union dbg_idle_chk_reg *regs;
4163 	u8 reg_id;
4164 
4165 	hdr = (struct dbg_idle_chk_result_hdr*)dump_buf;
4166 	regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4167 	cond_regs = &regs[0].cond_reg;
4168 	info_regs = &regs[rule->num_cond_regs].info_reg;
4169 
4170 	/* Dump rule data */
4171 	if (dump) {
4172 		OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4173 		hdr->rule_id = rule_id;
4174 		hdr->mem_entry_id = fail_entry_id;
4175 		hdr->severity = rule->severity;
4176 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4177 	}
4178 
4179 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4180 
4181 	/* Dump condition register values */
4182 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4183 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4184 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4185 
4186 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4187 
4188 		/* Write register header */
4189 		if (!dump) {
4190 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4191 			continue;
4192 		}
4193 
4194 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4195 		OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4196 		reg_hdr->start_entry = reg->start_entry;
4197 		reg_hdr->size = reg->entry_size;
4198 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4199 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4200 
4201 		/* Write register values */
4202 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4203 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4204 	}
4205 
4206 	/* Dump info register values */
4207 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4208 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4209 		u32 block_id;
4210 
4211 		/* Check if register's block is in reset */
4212 		if (!dump) {
4213 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4214 			continue;
4215 		}
4216 
4217 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4218 		if (block_id >= MAX_BLOCK_ID) {
4219 			DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4220 			return 0;
4221 		}
4222 
4223 		if (!dev_data->block_in_reset[block_id]) {
4224 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4225 			bool wide_bus, eval_mode, mode_match = true;
4226 			u16 modes_buf_offset;
4227 			u32 addr;
4228 
4229 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4230 
4231 			/* Check mode */
4232 			eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4233 			if (eval_mode) {
4234 				modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4235 				mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4236 			}
4237 
4238 			if (!mode_match)
4239 				continue;
4240 
4241 			addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4242 			wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4243 
4244 			/* Write register header */
4245 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4246 			hdr->num_dumped_info_regs++;
4247 			OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4248 			reg_hdr->size = reg->size;
4249 			SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4250 
4251 			/* Write register values */
4252 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4253 		}
4254 	}
4255 
4256 	return offset;
4257 }
4258 
4259 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4260 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4261 											struct ecore_ptt *p_ptt,
4262 											u32 *dump_buf,
4263 											bool dump,
4264 											const struct dbg_idle_chk_rule *input_rules,
4265 											u32 num_input_rules,
4266 											u32 *num_failing_rules)
4267 {
4268 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4269 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4270 	u32 i, offset = 0;
4271 	u16 entry_id;
4272 	u8 reg_id;
4273 
4274 	*num_failing_rules = 0;
4275 
4276 	for (i = 0; i < num_input_rules; i++) {
4277 		const struct dbg_idle_chk_cond_reg *cond_regs;
4278 		const struct dbg_idle_chk_rule *rule;
4279 		const union dbg_idle_chk_reg *regs;
4280 		u16 num_reg_entries = 1;
4281 		bool check_rule = true;
4282 		const u32 *imm_values;
4283 
4284 		rule = &input_rules[i];
4285 		regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4286 		cond_regs = &regs[0].cond_reg;
4287 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4288 
4289 		/* Check if all condition register blocks are out of reset, and
4290 		 * find maximal number of entries (all condition registers that
4291 		 * are memories must have the same size, which is > 1).
4292 		 */
4293 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4294 			u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4295 
4296 			if (block_id >= MAX_BLOCK_ID) {
4297 				DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4298 				return 0;
4299 			}
4300 
4301 			check_rule = !dev_data->block_in_reset[block_id];
4302 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4303 				num_reg_entries = cond_regs[reg_id].num_entries;
4304 		}
4305 
4306 		if (!check_rule && dump)
4307 			continue;
4308 
4309 		if (!dump) {
4310 			u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4311 
4312 			offset += num_reg_entries * entry_dump_size;
4313 			(*num_failing_rules) += num_reg_entries;
4314 			continue;
4315 		}
4316 
4317 		/* Go over all register entries (number of entries is the same for all
4318 		 * condition registers).
4319 		 */
4320 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4321 			u32 next_reg_offset = 0;
4322 
4323 			/* Read current entry of all condition registers */
4324 			for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4325 				const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4326 				u32 padded_entry_size, addr;
4327 				bool wide_bus;
4328 
4329 				/* Find GRC address (if it's a memory, the address of the
4330 				 * specific entry is calculated).
4331 				 */
4332 				addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4333 				wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4334 				if (reg->num_entries > 1 || reg->start_entry > 0) {
4335 					padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4336 					addr += (reg->start_entry + entry_id) * padded_entry_size;
4337 				}
4338 
4339 				/* Read registers */
4340 				if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4341 					DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4342 					return 0;
4343 				}
4344 
4345 				next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4346 			}
4347 
4348 			/* Call rule condition function. if returns true, it's a failure.*/
4349 			if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4350 				offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4351 				(*num_failing_rules)++;
4352 			}
4353 		}
4354 	}
4355 
4356 	return offset;
4357 }
4358 
4359 /* Performs Idle Check Dump to the specified buffer.
4360  * Returns the dumped size in dwords.
4361  */
4362 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4363 							   struct ecore_ptt *p_ptt,
4364 							   u32 *dump_buf,
4365 							   bool dump)
4366 {
4367 	u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4368 
4369 	/* Dump global params */
4370 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4371 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4372 
4373 	/* Dump idle check section header with a single parameter */
4374 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4375 	num_failing_rules_offset = offset;
4376 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4377 
4378 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4379 		const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4380 		bool eval_mode, mode_match = true;
4381 		u32 curr_failing_rules;
4382 		u16 modes_buf_offset;
4383 
4384 		/* Check mode */
4385 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4386 		if (eval_mode) {
4387 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4388 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4389 		}
4390 
4391 		if (mode_match) {
4392 			offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4393 			num_failing_rules += curr_failing_rules;
4394 		}
4395 
4396 		input_offset += cond_hdr->data_size;
4397 	}
4398 
4399 	/* Overwrite num_rules parameter */
4400 	if (dump)
4401 		ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4402 
4403 	/* Dump last section */
4404 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4405 
4406 	return offset;
4407 }
4408 
4409 /* Finds the meta data image in NVRAM */
4410 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4411 											  struct ecore_ptt *p_ptt,
4412 											  u32 image_type,
4413 											  u32 *nvram_offset_bytes,
4414 											  u32 *nvram_size_bytes)
4415 {
4416 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4417 	struct mcp_file_att file_att;
4418 	int nvm_result;
4419 
4420 	/* Call NVRAM get file command */
4421 	nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32*)&file_att);
4422 
4423 	/* Check response */
4424 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4425 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4426 
4427 	/* Update return values */
4428 	*nvram_offset_bytes = file_att.nvm_start_addr;
4429 	*nvram_size_bytes = file_att.len;
4430 
4431 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4432 
4433 	/* Check alignment */
4434 	if (*nvram_size_bytes & 0x3)
4435 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4436 
4437 	return DBG_STATUS_OK;
4438 }
4439 
4440 /* Reads data from NVRAM */
4441 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4442 										struct ecore_ptt *p_ptt,
4443 										u32 nvram_offset_bytes,
4444 										u32 nvram_size_bytes,
4445 										u32 *ret_buf)
4446 {
4447 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4448 	s32 bytes_left = nvram_size_bytes;
4449 	u32 read_offset = 0;
4450 
4451 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4452 
4453 	do {
4454 		bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4455 
4456 		/* Call NVRAM read command */
4457 		if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32*)((u8*)ret_buf + read_offset)))
4458 			return DBG_STATUS_NVRAM_READ_FAILED;
4459 
4460 		/* Check response */
4461 		if ((ret_mcp_resp  & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4462 			return DBG_STATUS_NVRAM_READ_FAILED;
4463 
4464 		/* Update read offset */
4465 		read_offset += ret_read_size;
4466 		bytes_left -= ret_read_size;
4467 	} while (bytes_left > 0);
4468 
4469 	return DBG_STATUS_OK;
4470 }
4471 
4472 /* Get info on the MCP Trace data in the scratchpad:
4473  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4474  * - trace_data_size (OUT): trace data size in bytes (without the header)
4475  */
4476 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4477 													 struct ecore_ptt *p_ptt,
4478 													 u32 *trace_data_grc_addr,
4479 													 u32 *trace_data_size)
4480 {
4481 	u32 spad_trace_offsize, signature;
4482 
4483 	/* Read trace section offsize structure from MCP scratchpad */
4484 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4485 
4486 	/* Extract trace section address from offsize (in scratchpad) */
4487 	*trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4488 
4489 	/* Read signature from MCP trace section */
4490 	signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4491 
4492 	if (signature != MFW_TRACE_SIGNATURE)
4493 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4494 
4495 	/* Read trace size from MCP trace section */
4496 	*trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4497 
4498 	return DBG_STATUS_OK;
4499 }
4500 
4501 /* Reads MCP trace meta data image from NVRAM
4502  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4503  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4504  *			      loaded from file).
4505  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4506  */
4507 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4508 													 struct ecore_ptt *p_ptt,
4509 													 u32 trace_data_size_bytes,
4510 													 u32 *running_bundle_id,
4511 													 u32 *trace_meta_offset,
4512 													 u32 *trace_meta_size)
4513 {
4514 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4515 
4516 	/* Read MCP trace section offsize structure from MCP scratchpad */
4517 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4518 
4519 	/* Find running bundle ID */
4520 	running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4521 	*running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4522 	if (*running_bundle_id > 1)
4523 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4524 
4525 	/* Find image in NVRAM */
4526 	nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4527 	return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4528 }
4529 
4530 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4531 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4532 												 struct ecore_ptt *p_ptt,
4533 												 u32 nvram_offset_in_bytes,
4534 												 u32 size_in_bytes,
4535 												 u32 *buf)
4536 {
4537 	u8 modules_num, module_len, i, *byte_buf = (u8*)buf;
4538 	enum dbg_status status;
4539 	u32 signature;
4540 
4541 	/* Read meta data from NVRAM */
4542 	status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4543 	if (status != DBG_STATUS_OK)
4544 		return status;
4545 
4546 	/* Extract and check first signature */
4547 	signature = ecore_read_unaligned_dword(byte_buf);
4548 	byte_buf += sizeof(signature);
4549 	if (signature != NVM_MAGIC_VALUE)
4550 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4551 
4552 	/* Extract number of modules */
4553 	modules_num = *(byte_buf++);
4554 
4555 	/* Skip all modules */
4556 	for (i = 0; i < modules_num; i++) {
4557 		module_len = *(byte_buf++);
4558 		byte_buf += module_len;
4559 	}
4560 
4561 	/* Extract and check second signature */
4562 	signature = ecore_read_unaligned_dword(byte_buf);
4563 	byte_buf += sizeof(signature);
4564 	if (signature != NVM_MAGIC_VALUE)
4565 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4566 
4567 	return DBG_STATUS_OK;
4568 }
4569 
4570 /* Dump MCP Trace */
4571 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4572 											struct ecore_ptt *p_ptt,
4573 											u32 *dump_buf,
4574 											bool dump,
4575 											u32 *num_dumped_dwords)
4576 {
4577 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4578 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4579 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4580 	u32 running_bundle_id, offset = 0;
4581 	enum dbg_status status;
4582 	bool mcp_access;
4583 	int halted = 0;
4584 
4585 	*num_dumped_dwords = 0;
4586 
4587 	mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4588 
4589 	/* Get trace data info */
4590 	status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4591 	if (status != DBG_STATUS_OK)
4592 		return status;
4593 
4594 	/* Dump global params */
4595 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4596 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4597 
4598 	/* Halt MCP while reading from scratchpad so the read data will be
4599 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4600 	 * risk that it may be corrupt.
4601 	 */
4602 	if (dump && mcp_access) {
4603 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4604 		if (!halted)
4605 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4606 	}
4607 
4608 	/* Find trace data size */
4609 	trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4610 
4611 	/* Dump trace data section header and param */
4612 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4613 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4614 
4615 	/* Read trace data from scratchpad into dump buffer */
4616 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4617 
4618 	/* Resume MCP (only if halt succeeded) */
4619 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4620 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4621 
4622 	/* Dump trace meta section header */
4623 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4624 
4625 	/* Read trace meta only if NVRAM access is enabled
4626 	 * (trace_meta_size_bytes is dword-aligned).
4627 	 */
4628 	if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4629 		status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4630 		if (status == DBG_STATUS_OK)
4631 			trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4632 	}
4633 
4634 	/* Dump trace meta size param */
4635 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4636 
4637 	/* Read trace meta image into dump buffer */
4638 	if (dump && trace_meta_size_dwords)
4639 		status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4640 	if (status == DBG_STATUS_OK)
4641 		offset += trace_meta_size_dwords;
4642 
4643 	/* Dump last section */
4644 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4645 
4646 	*num_dumped_dwords = offset;
4647 
4648 	/* If no mcp access, indicate that the dump doesn't contain the meta
4649 	 * data from NVRAM.
4650 	 */
4651 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4652 }
4653 
4654 /* Dump GRC FIFO */
4655 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4656 										   struct ecore_ptt *p_ptt,
4657 										   u32 *dump_buf,
4658 										   bool dump,
4659 										   u32 *num_dumped_dwords)
4660 {
4661 	u32 dwords_read, size_param_offset, offset = 0;
4662 	bool fifo_has_data;
4663 
4664 	*num_dumped_dwords = 0;
4665 
4666 	/* Dump global params */
4667 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4668 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4669 
4670 	/* Dump fifo data section header and param. The size param is 0 for
4671 	 * now, and is overwritten after reading the FIFO.
4672 	 */
4673 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4674 	size_param_offset = offset;
4675 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4676 
4677 	if (dump) {
4678 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4679 
4680 		/* Pull available data from fifo. Use DMAE since this is
4681 		 * widebus memory and must be accessed atomically. Test for
4682 		 * dwords_read not passing buffer size since more entries could
4683 		 * be added to the buffer as we
4684 		 * are emptying it.
4685 		 */
4686 		for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += REG_FIFO_ELEMENT_DWORDS) {
4687 			if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, (u64)(osal_uintptr_t)(&dump_buf[offset]), REG_FIFO_ELEMENT_DWORDS, 0))
4688 				return DBG_STATUS_DMAE_FAILED;
4689 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4690 		}
4691 
4692 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4693 	}
4694 	else {
4695 
4696 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4697 		 * test how much data is available, except for reading it.
4698 		 */
4699 		offset += REG_FIFO_DEPTH_DWORDS;
4700 	}
4701 
4702 	/* Dump last section */
4703 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4704 
4705 	*num_dumped_dwords = offset;
4706 
4707 	return DBG_STATUS_OK;
4708 }
4709 
4710 /* Dump IGU FIFO */
4711 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4712 										   struct ecore_ptt *p_ptt,
4713 										   u32 *dump_buf,
4714 										   bool dump,
4715 										   u32 *num_dumped_dwords)
4716 {
4717 	u32 dwords_read, size_param_offset, offset = 0;
4718 	bool fifo_has_data;
4719 
4720 	*num_dumped_dwords = 0;
4721 
4722 	/* Dump global params */
4723 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4724 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4725 
4726 	/* Dump fifo data section header and param. The size param is 0 for
4727 	 * now, and is overwritten after reading the FIFO.
4728 	 */
4729 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4730 	size_param_offset = offset;
4731 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4732 
4733 	if (dump) {
4734 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4735 
4736 		/* Pull available data from fifo. Use DMAE since this is
4737 		 * widebus memory and must be accessed atomically. Test for
4738 		 * dwords_read not passing buffer size since more entries could
4739 		 * be added to the buffer as we are emptying it.
4740 		 */
4741 		for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += IGU_FIFO_ELEMENT_DWORDS) {
4742 			if (ecore_dmae_grc2host(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_MEMORY, (u64)(osal_uintptr_t)(&dump_buf[offset]), IGU_FIFO_ELEMENT_DWORDS, 0))
4743 				return DBG_STATUS_DMAE_FAILED;
4744 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4745 		}
4746 
4747 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4748 	}
4749 	else {
4750 
4751 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4752 		 * test how much data is available, except for reading it.
4753 		 */
4754 		offset += IGU_FIFO_DEPTH_DWORDS;
4755 	}
4756 
4757 	/* Dump last section */
4758 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4759 
4760 	*num_dumped_dwords = offset;
4761 
4762 	return DBG_STATUS_OK;
4763 }
4764 
4765 /* Protection Override dump */
4766 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4767 													  struct ecore_ptt *p_ptt,
4768 													  u32 *dump_buf,
4769 													  bool dump,
4770 													  u32 *num_dumped_dwords)
4771 {
4772 	u32 size_param_offset, override_window_dwords, offset = 0;
4773 
4774 	*num_dumped_dwords = 0;
4775 
4776 	/* Dump global params */
4777 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4778 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4779 
4780 	/* Dump data section header and param. The size param is 0 for now,
4781 	 * and is overwritten after reading the data.
4782 	 */
4783 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4784 	size_param_offset = offset;
4785 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4786 
4787 	if (dump) {
4788 		/* Add override window info to buffer */
4789 		override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4790 		if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_PROTECTION_OVERRIDE_WINDOW, (u64)(osal_uintptr_t)(dump_buf + offset), override_window_dwords, 0))
4791 			return DBG_STATUS_DMAE_FAILED;
4792 		offset += override_window_dwords;
4793 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4794 	}
4795 	else {
4796 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4797 	}
4798 
4799 	/* Dump last section */
4800 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4801 
4802 	*num_dumped_dwords = offset;
4803 
4804 	return DBG_STATUS_OK;
4805 }
4806 
4807 /* Performs FW Asserts Dump to the specified buffer.
4808  * Returns the dumped size in dwords.
4809  */
4810 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4811 								 struct ecore_ptt *p_ptt,
4812 								 u32 *dump_buf,
4813 								 bool dump)
4814 {
4815 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4816 	struct fw_asserts_ram_section *asserts;
4817 	char storm_letter_str[2] = "?";
4818 	struct fw_info fw_info;
4819 	u32 offset = 0;
4820 	u8 storm_id;
4821 
4822 	/* Dump global params */
4823 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4824 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4825 
4826 	/* Find Storm dump size */
4827 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4828 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4829 		struct storm_defs *storm = &s_storm_defs[storm_id];
4830 
4831 		if (dev_data->block_in_reset[storm->block_id])
4832 			continue;
4833 
4834 		/* Read FW info for the current Storm  */
4835 		ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4836 
4837 		asserts = &fw_info.fw_asserts_section;
4838 
4839 		/* Dump FW Asserts section header and params */
4840 		storm_letter_str[0] = storm->letter;
4841 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4842 		offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4843 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4844 
4845 		/* Read and dump FW Asserts data */
4846 		if (!dump) {
4847 			offset += asserts->list_element_dword_size;
4848 			continue;
4849 		}
4850 
4851 		fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4852 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4853 		next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4854 		next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4855 		last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4856 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4857 					last_list_idx * asserts->list_element_dword_size;
4858 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4859 	}
4860 
4861 	/* Dump last section */
4862 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4863 
4864 	return offset;
4865 }
4866 
4867 /***************************** Public Functions *******************************/
4868 
4869 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4870 {
4871 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr*)bin_ptr;
4872 	u8 buf_id;
4873 
4874 	/* convert binary data to debug arrays */
4875 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4876 		s_dbg_arrays[buf_id].ptr = (u32*)(bin_ptr + buf_array[buf_id].offset);
4877 		s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4878 	}
4879 
4880 	return DBG_STATUS_OK;
4881 }
4882 
4883 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4884 {
4885 	if (ver < TOOLS_VERSION)
4886 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4887 
4888 	s_app_ver = ver;
4889 
4890 	return DBG_STATUS_OK;
4891 }
4892 
4893 u32 ecore_dbg_get_fw_func_ver(void)
4894 {
4895 	return TOOLS_VERSION;
4896 }
4897 
4898 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4899 {
4900 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4901 
4902 	return (enum chip_ids)dev_data->chip_id;
4903 }
4904 
4905 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4906 									struct ecore_ptt *p_ptt,
4907 									bool one_shot_en,
4908 									u8 force_hw_dwords,
4909 									bool unify_inputs,
4910 									bool grc_input_en)
4911 {
4912 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4913 	enum dbg_status status;
4914 
4915 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4916 	if (status != DBG_STATUS_OK)
4917 		return status;
4918 
4919 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4920 
4921 	if (force_hw_dwords &&
4922 		force_hw_dwords != 4 &&
4923 		force_hw_dwords != 8)
4924 		return DBG_STATUS_INVALID_ARGS;
4925 
4926 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4927 		return DBG_STATUS_DBG_BUS_IN_USE;
4928 
4929 	/* Update reset state of all blocks */
4930 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4931 
4932 	/* Disable all debug inputs */
4933 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4934 	if (status != DBG_STATUS_OK)
4935 		return status;
4936 
4937 	/* Reset DBG block */
4938 	ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4939 
4940 	/* Set one-shot / wrap-around */
4941 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4942 
4943 	/* Init state params */
4944 	OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4945 	dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4946 	dev_data->bus.state = DBG_BUS_STATE_READY;
4947 	dev_data->bus.one_shot_en = one_shot_en;
4948 	dev_data->bus.hw_dwords = force_hw_dwords;
4949 	dev_data->bus.grc_input_en = grc_input_en;
4950 	dev_data->bus.unify_inputs = unify_inputs;
4951 	dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4952 
4953 	/* Init special DBG block */
4954 	if (grc_input_en)
4955 		SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4956 
4957 	return DBG_STATUS_OK;
4958 }
4959 
4960 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4961 											 struct ecore_ptt *p_ptt,
4962 											 u16 buf_size_kb)
4963 {
4964 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4965 	dma_addr_t pci_buf_phys_addr;
4966 	void *pci_buf;
4967 
4968 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4969 
4970 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4971 		return DBG_STATUS_OUTPUT_ALREADY_SET;
4972 	if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4973 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4974 
4975 	dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
4976 	dev_data->bus.pci_buf.size = buf_size_kb * 1024;
4977 	if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
4978 		return DBG_STATUS_INVALID_ARGS;
4979 
4980 	pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
4981 	if (!pci_buf)
4982 		return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
4983 
4984 	OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
4985 
4986 	dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
4987 	dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
4988 
4989 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
4990 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
4991 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
4992 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
4993 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
4994 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
4995 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
4996 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
4997 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
4998 
4999 	return DBG_STATUS_OK;
5000 }
5001 
5002 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5003 											struct ecore_ptt *p_ptt,
5004 											u8 port_id,
5005 											u32 dest_addr_lo32,
5006 											u16 dest_addr_hi16,
5007 											u16 data_limit_size_kb,
5008 											bool send_to_other_engine,
5009 											bool rcv_from_other_engine)
5010 {
5011 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5012 
5013 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5014 
5015 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5016 		return DBG_STATUS_OUTPUT_ALREADY_SET;
5017 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5018 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5019 	if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5020 		return DBG_STATUS_INVALID_ARGS;
5021 
5022 	dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5023 	dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5024 
5025 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5026 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5027 
5028 	if (send_to_other_engine)
5029 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5030 	else
5031 		ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5032 
5033 	if (rcv_from_other_engine) {
5034 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5035 	}
5036 	else {
5037 
5038 		/* Configure ethernet header of 14 bytes */
5039 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5040 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5041 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5042 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5043 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5044 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5045 		if (data_limit_size_kb)
5046 			ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5047 	}
5048 
5049 	return DBG_STATUS_OK;
5050 }
5051 
5052 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5053 									  u8 enable_mask,
5054 									  u8 right_shift)
5055 {
5056 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5057 	u8 curr_shifted_enable_mask, shifted_enable_mask;
5058 	u32 block_id;
5059 
5060 	shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5061 
5062 	if (dev_data->bus.num_enabled_blocks) {
5063 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5064 			struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5065 
5066 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5067 				continue;
5068 
5069 			curr_shifted_enable_mask =
5070 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5071 					VALUES_PER_CYCLE,
5072 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5073 			if (shifted_enable_mask & curr_shifted_enable_mask)
5074 				return true;
5075 		}
5076 	}
5077 
5078 	return false;
5079 }
5080 
5081 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5082 					   enum block_id block_id,
5083 					   u8 line_num,
5084 					   u8 enable_mask,
5085 					   u8 right_shift,
5086 					   u8 force_valid_mask,
5087 					   u8 force_frame_mask)
5088 {
5089 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5090 	struct block_defs *block = s_block_defs[block_id];
5091 	struct dbg_bus_block_data *block_bus;
5092 	struct dbg_bus_block *block_desc;
5093 
5094 	block_bus = &dev_data->bus.blocks[block_id];
5095 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5096 
5097 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5098 
5099 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5100 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5101 	if (block_id >= MAX_BLOCK_ID)
5102 		return DBG_STATUS_INVALID_ARGS;
5103 	if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5104 		return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5105 	if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5106 		line_num >= NUM_DBG_LINES(block_desc) ||
5107 		!enable_mask ||
5108 		enable_mask > MAX_CYCLE_VALUES_MASK ||
5109 		force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5110 		force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5111 		right_shift > VALUES_PER_CYCLE - 1)
5112 		return DBG_STATUS_INVALID_ARGS;
5113 	if (dev_data->block_in_reset[block_id])
5114 		return DBG_STATUS_BLOCK_IN_RESET;
5115 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5116 		return DBG_STATUS_INPUT_OVERLAP;
5117 
5118 	dev_data->bus.blocks[block_id].line_num = line_num;
5119 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5120 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5121 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5122 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5123 
5124 	dev_data->bus.num_enabled_blocks++;
5125 
5126 	return DBG_STATUS_OK;
5127 }
5128 
5129 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5130 										   enum dbg_storms storm,
5131 										   enum dbg_bus_storm_modes storm_mode)
5132 {
5133 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5134 
5135 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm, storm_mode);
5136 
5137 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5138 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5139 	if (dev_data->bus.hw_dwords >= 4)
5140 		return DBG_STATUS_HW_ONLY_RECORDING;
5141 	if (storm >= MAX_DBG_STORMS)
5142 		return DBG_STATUS_INVALID_ARGS;
5143 	if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5144 		return DBG_STATUS_INVALID_ARGS;
5145 	if (dev_data->bus.unify_inputs)
5146 		return DBG_STATUS_INVALID_ARGS;
5147 
5148 	if (dev_data->bus.storms[storm].enabled)
5149 		return DBG_STATUS_STORM_ALREADY_ENABLED;
5150 
5151 	dev_data->bus.storms[storm].enabled = true;
5152 	dev_data->bus.storms[storm].mode = (u8)storm_mode;
5153 	dev_data->bus.storms[storm].hw_id = dev_data->bus.num_enabled_storms;
5154 
5155 	dev_data->bus.num_enabled_storms++;
5156 
5157 	return DBG_STATUS_OK;
5158 }
5159 
5160 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5161 											   struct ecore_ptt *p_ptt,
5162 											   u8 valid_mask,
5163 											   u8 frame_mask,
5164 											   u32 tick_len)
5165 {
5166 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5167 
5168 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5169 
5170 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5171 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5172 	if (valid_mask > 0x7 || frame_mask > 0x7)
5173 		return DBG_STATUS_INVALID_ARGS;
5174 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5175 		return DBG_STATUS_INPUT_OVERLAP;
5176 
5177 	dev_data->bus.timestamp_input_en = true;
5178 	dev_data->bus.num_enabled_blocks++;
5179 
5180 	SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5181 
5182 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5183 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5184 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5185 
5186 	return DBG_STATUS_OK;
5187 }
5188 
5189 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5190 													   enum dbg_storms storm_id,
5191 													   u8 min_eid,
5192 													   u8 max_eid)
5193 {
5194 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5195 	struct dbg_bus_storm_data *storm_bus;
5196 
5197 	storm_bus = &dev_data->bus.storms[storm_id];
5198 
5199 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5200 
5201 	if (storm_id >= MAX_DBG_STORMS)
5202 		return DBG_STATUS_INVALID_ARGS;
5203 	if (min_eid > max_eid)
5204 		return DBG_STATUS_INVALID_ARGS;
5205 	if (!storm_bus->enabled)
5206 		return DBG_STATUS_STORM_NOT_ENABLED;
5207 
5208 	storm_bus->eid_filter_en = 1;
5209 	storm_bus->eid_range_not_mask = 1;
5210 	storm_bus->eid_filter_params.range.min = min_eid;
5211 	storm_bus->eid_filter_params.range.max = max_eid;
5212 
5213 	return DBG_STATUS_OK;
5214 }
5215 
5216 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5217 													  enum dbg_storms storm_id,
5218 													  u8 eid_val,
5219 													  u8 eid_mask)
5220 {
5221 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5222 	struct dbg_bus_storm_data *storm_bus;
5223 
5224 	storm_bus = &dev_data->bus.storms[storm_id];
5225 
5226 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5227 
5228 	if (storm_id >= MAX_DBG_STORMS)
5229 		return DBG_STATUS_INVALID_ARGS;
5230 	if (!storm_bus->enabled)
5231 		return DBG_STATUS_STORM_NOT_ENABLED;
5232 
5233 	storm_bus->eid_filter_en = 1;
5234 	storm_bus->eid_range_not_mask = 0;
5235 	storm_bus->eid_filter_params.mask.val = eid_val;
5236 	storm_bus->eid_filter_params.mask.mask = eid_mask;
5237 
5238 	return DBG_STATUS_OK;
5239 }
5240 
5241 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5242 												 enum dbg_storms storm_id,
5243 												 u32 cid)
5244 {
5245 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5246 	struct dbg_bus_storm_data *storm_bus;
5247 
5248 	storm_bus = &dev_data->bus.storms[storm_id];
5249 
5250 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5251 
5252 	if (storm_id >= MAX_DBG_STORMS)
5253 		return DBG_STATUS_INVALID_ARGS;
5254 	if (!storm_bus->enabled)
5255 		return DBG_STATUS_STORM_NOT_ENABLED;
5256 
5257 	storm_bus->cid_filter_en = 1;
5258 	storm_bus->cid = cid;
5259 
5260 	return DBG_STATUS_OK;
5261 }
5262 
5263 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5264 											struct ecore_ptt *p_ptt,
5265 											enum block_id block_id,
5266 											u8 const_msg_len)
5267 {
5268 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5269 
5270 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5271 
5272 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5273 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5274 	if (dev_data->bus.filter_en)
5275 		return DBG_STATUS_FILTER_ALREADY_ENABLED;
5276 	if (block_id >= MAX_BLOCK_ID)
5277 		return DBG_STATUS_INVALID_ARGS;
5278 	if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5279 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5280 	if (!dev_data->bus.unify_inputs)
5281 		return DBG_STATUS_FILTER_BUG;
5282 
5283 	dev_data->bus.filter_en = true;
5284 	dev_data->bus.next_constraint_id = 0;
5285 	dev_data->bus.adding_filter = true;
5286 
5287 	/* HW ID is set to 0 due to required unifyInputs */
5288 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5289 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5290 	if (const_msg_len > 0)
5291 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5292 
5293 	return DBG_STATUS_OK;
5294 }
5295 
5296 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5297 											 struct ecore_ptt *p_ptt,
5298 											 bool rec_pre_trigger,
5299 											 u8 pre_chunks,
5300 											 bool rec_post_trigger,
5301 											 u32 post_cycles,
5302 											 bool filter_pre_trigger,
5303 											 bool filter_post_trigger)
5304 {
5305 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5306 	enum dbg_bus_post_trigger_types post_trigger_type;
5307 	enum dbg_bus_pre_trigger_types pre_trigger_type;
5308 	struct dbg_bus_data *bus = &dev_data->bus;
5309 
5310 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5311 
5312 	if (bus->state != DBG_BUS_STATE_READY)
5313 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5314 	if (bus->trigger_en)
5315 		return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5316 	if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5317 		return DBG_STATUS_INVALID_ARGS;
5318 
5319 	bus->trigger_en = true;
5320 	bus->filter_pre_trigger = filter_pre_trigger;
5321 	bus->filter_post_trigger = filter_post_trigger;
5322 
5323 	if (rec_pre_trigger) {
5324 		pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5325 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5326 	}
5327 	else {
5328 		pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5329 	}
5330 
5331 	if (rec_post_trigger) {
5332 		post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5333 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5334 	}
5335 	else {
5336 		post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5337 	}
5338 
5339 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5340 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5341 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5342 
5343 	return DBG_STATUS_OK;
5344 }
5345 
5346 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5347 												struct ecore_ptt *p_ptt,
5348 												enum block_id block_id,
5349 												u8 const_msg_len,
5350 												u16 count_to_next)
5351 {
5352 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5353 	struct dbg_bus_data *bus = &dev_data->bus;
5354 	struct dbg_bus_block_data *block_bus;
5355 	u8 reg_offset;
5356 
5357 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5358 
5359 	block_bus = &bus->blocks[block_id];
5360 
5361 	if (!bus->trigger_en)
5362 		return DBG_STATUS_TRIGGER_NOT_ENABLED;
5363 	if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5364 		return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5365 	if (block_id >= MAX_BLOCK_ID)
5366 		return DBG_STATUS_INVALID_ARGS;
5367 	if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5368 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5369 	if (!count_to_next)
5370 		return DBG_STATUS_INVALID_ARGS;
5371 
5372 	bus->next_constraint_id = 0;
5373 	bus->adding_filter = false;
5374 
5375 	/* Store block's shifted enable mask */
5376 	SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5377 					   VALUES_PER_CYCLE,
5378 					   GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5379 
5380 	/* Set trigger state registers */
5381 	reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5382 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5383 	if (const_msg_len > 0)
5384 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5385 
5386 	/* Set trigger set registers */
5387 	reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5388 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5389 
5390 	/* Set next state to final state, and overwrite previous next state
5391 	 * (if any).
5392 	 */
5393 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5394 	if (bus->next_trigger_state > 0) {
5395 		reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5396 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5397 	}
5398 
5399 	bus->next_trigger_state++;
5400 
5401 	return DBG_STATUS_OK;
5402 }
5403 
5404 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5405 			 struct ecore_ptt *p_ptt,
5406 			 enum dbg_bus_constraint_ops constraint_op,
5407 			 u32 data_val,
5408 			 u32 data_mask,
5409 			 bool compare_frame,
5410 			 u8 frame_bit,
5411 			 u8 cycle_offset,
5412 			 u8 dword_offset_in_cycle,
5413 			 bool is_mandatory)
5414 {
5415 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5416 	struct dbg_bus_data *bus = &dev_data->bus;
5417 	u16 dword_offset, range = 0;
5418 
5419 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5420 
5421 	if (!bus->filter_en && !dev_data->bus.trigger_en)
5422 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5423 	if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5424 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5425 	if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5426 		return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5427 	if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5428 		return DBG_STATUS_INVALID_ARGS;
5429 	if (compare_frame &&
5430 		constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5431 		constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5432 		return DBG_STATUS_INVALID_ARGS;
5433 
5434 	dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5435 
5436 	if (!bus->adding_filter) {
5437 		u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5438 		struct dbg_bus_trigger_state_data *trigger_state;
5439 
5440 		trigger_state = &bus->trigger_states[curr_trigger_state_id];
5441 
5442 		/* Check if the selected dword is enabled in the block */
5443 		if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5444 			return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5445 
5446 		/* Add selected dword to trigger state's dword mask */
5447 		SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5448 	}
5449 
5450 	/* Prepare data mask and range */
5451 	if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5452 		constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5453 		data_mask = ~data_mask;
5454 	}
5455 	else {
5456 		u8 lsb, width;
5457 
5458 		/* Extract lsb and width from mask */
5459 		if (!data_mask)
5460 			return DBG_STATUS_INVALID_ARGS;
5461 
5462 		for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5463 		for (width = 0;
5464 		width < 32 - lsb && (data_mask & 1);
5465 			width++, data_mask >>= 1) {}
5466 			if (data_mask)
5467 				return DBG_STATUS_INVALID_ARGS;
5468 		range = (lsb << 5) | (width - 1);
5469 	}
5470 
5471 	/* Add constraint */
5472 	ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5473 		dev_data->bus.next_constraint_id,
5474 		s_constraint_op_defs[constraint_op].hw_op_val,
5475 		data_val, data_mask, frame_bit,
5476 		compare_frame ? 0 : 1, dword_offset, range,
5477 		s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5478 		is_mandatory ? 1 : 0);
5479 
5480 	/* If first constraint, fill other 3 constraints with dummy constraints
5481 	 * that always match (using the same offset).
5482 	 */
5483 	if (!dev_data->bus.next_constraint_id) {
5484 		u8 i;
5485 
5486 		for (i = 1; i < MAX_CONSTRAINTS; i++)
5487 			ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5488 				i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5489 				0, 1, dword_offset, 0, 0, 1);
5490 	}
5491 
5492 	bus->next_constraint_id++;
5493 
5494 	return DBG_STATUS_OK;
5495 }
5496 
5497 /* Configure the DBG block client mask */
5498 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5499 										struct ecore_ptt *p_ptt)
5500 {
5501 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5502 	struct dbg_bus_data *bus = &dev_data->bus;
5503 	u32 block_id, client_mask = 0;
5504 	u8 storm_id;
5505 
5506 	/* Update client mask for Storm inputs */
5507 	if (bus->num_enabled_storms)
5508 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5509 			struct storm_defs *storm = &s_storm_defs[storm_id];
5510 
5511 			if (bus->storms[storm_id].enabled)
5512 				client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5513 		}
5514 
5515 	/* Update client mask for block inputs */
5516 	if (bus->num_enabled_blocks) {
5517 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5518 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5519 			struct block_defs *block = s_block_defs[block_id];
5520 
5521 			if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5522 				client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5523 		}
5524 	}
5525 
5526 	/* Update client mask for GRC input */
5527 	if (bus->grc_input_en)
5528 		client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5529 
5530 	/* Update client mask for timestamp input */
5531 	if (bus->timestamp_input_en)
5532 		client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5533 
5534 	ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5535 }
5536 
5537 /* Configure the DBG block framing mode */
5538 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5539 													struct ecore_ptt *p_ptt)
5540 {
5541 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5542 	struct dbg_bus_data *bus = &dev_data->bus;
5543 	enum dbg_bus_frame_modes dbg_framing_mode;
5544 	u32 block_id;
5545 
5546 	if (!bus->hw_dwords && bus->num_enabled_blocks) {
5547 		struct dbg_bus_line *line_desc;
5548 		u8 hw_dwords;
5549 
5550 		/* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5551 		 * (256-bit mode).
5552 		 */
5553 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5554 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5555 
5556 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5557 				continue;
5558 
5559 			line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5560 			hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5561 
5562 			if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5563 				return DBG_STATUS_NON_MATCHING_LINES;
5564 
5565 			/* The DBG block doesn't support triggers and
5566 			 * filters on 256b debug lines.
5567 			 */
5568 			if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5569 				return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5570 
5571 			bus->hw_dwords = hw_dwords;
5572 		}
5573 	}
5574 
5575 	switch (bus->hw_dwords) {
5576 	case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5577 	case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5578 	case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5579 	default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5580 	}
5581 	ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5582 
5583 	return DBG_STATUS_OK;
5584 }
5585 
5586 /* Configure the DBG block Storm data */
5587 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5588 										  struct ecore_ptt *p_ptt)
5589 {
5590 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5591 	struct dbg_bus_data *bus = &dev_data->bus;
5592 	u8 storm_id, i, next_storm_id = 0;
5593 	u32 storm_id_mask = 0;
5594 
5595 	/* Check if SEMI sync FIFO is empty */
5596 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5597 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5598 		struct storm_defs *storm = &s_storm_defs[storm_id];
5599 
5600 		if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5601 			return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5602 	}
5603 
5604 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5605 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5606 
5607 		if (storm_bus->enabled)
5608 			storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5609 	}
5610 
5611 	ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5612 
5613 	/* Disable storm stall if recording to internal buffer in one-shot */
5614 	ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5615 
5616 	/* Configure calendar */
5617 	for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5618 
5619 		/* Find next enabled Storm */
5620 		for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5621 
5622 		/* Configure calendar slot */
5623 		ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5624 	}
5625 
5626 	return DBG_STATUS_OK;
5627 }
5628 
5629 /* Assign HW ID to each dword/qword:
5630  * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5631  * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5632  * data synchronization issues. however, we need to check if there is a trigger
5633  * state for which more than one dword has a constraint. if there is, we cannot
5634  * assign a different HW ID to each dword (since a trigger state has a single
5635  * HW ID), so we assign a different HW ID to each block.
5636  */
5637 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5638 						 u8 hw_ids[VALUES_PER_CYCLE])
5639 {
5640 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5641 	struct dbg_bus_data *bus = &dev_data->bus;
5642 	bool hw_id_per_dword = true;
5643 	u8 val_id, state_id;
5644 	u32 block_id;
5645 
5646 	OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5647 
5648 	if (bus->unify_inputs)
5649 		return;
5650 
5651 	if (bus->trigger_en) {
5652 		for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5653 			u8 num_dwords = 0;
5654 
5655 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5656 				if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5657 					num_dwords++;
5658 
5659 			if (num_dwords > 1)
5660 				hw_id_per_dword = false;
5661 		}
5662 	}
5663 
5664 	if (hw_id_per_dword) {
5665 
5666 		/* Assign a different HW ID for each dword */
5667 		for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5668 			hw_ids[val_id] = val_id;
5669 	}
5670 	else {
5671 		u8 shifted_enable_mask, next_hw_id = 0;
5672 
5673 		/* Assign HW IDs according to blocks enable /  */
5674 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5675 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5676 
5677 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5678 				continue;
5679 
5680 			block_bus->hw_id = next_hw_id++;
5681 			if (!block_bus->hw_id)
5682 				continue;
5683 
5684 			shifted_enable_mask =
5685 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5686 					VALUES_PER_CYCLE,
5687 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5688 
5689 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5690 				if (shifted_enable_mask & (1 << val_id))
5691 					hw_ids[val_id] = block_bus->hw_id;
5692 		}
5693 	}
5694 }
5695 
5696 /* Configure the DBG block HW blocks data */
5697 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5698 							   struct ecore_ptt *p_ptt)
5699 {
5700 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5701 	struct dbg_bus_data *bus = &dev_data->bus;
5702 	u8 hw_ids[VALUES_PER_CYCLE];
5703 	u8 val_id, state_id;
5704 
5705 	ecore_assign_hw_ids(p_hwfn, hw_ids);
5706 
5707 	/* Assign a HW ID to each trigger state */
5708 	if (dev_data->bus.trigger_en) {
5709 		for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5710 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5711 				u8 state_data = bus->trigger_states[state_id].data;
5712 
5713 				if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5714 					ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5715 					break;
5716 				}
5717 			}
5718 		}
5719 	}
5720 
5721 	/* Configure HW ID mask */
5722 	dev_data->bus.hw_id_mask = 0;
5723 	for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5724 		bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5725 	ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5726 
5727 	/* Configure additional K2 PCIE registers */
5728 	if (dev_data->chip_id == CHIP_K2 &&
5729 		(GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5730 			GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5731 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5732 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5733 	}
5734 }
5735 
5736 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5737 									struct ecore_ptt *p_ptt)
5738 {
5739 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5740 	struct dbg_bus_data *bus = &dev_data->bus;
5741 	enum dbg_bus_filter_types filter_type;
5742 	enum dbg_status status;
5743 	u32 block_id;
5744 	u8 storm_id;
5745 
5746 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5747 
5748 	if (bus->state != DBG_BUS_STATE_READY)
5749 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5750 
5751 	/* Check if any input was enabled */
5752 	if (!bus->num_enabled_storms &&
5753 		!bus->num_enabled_blocks &&
5754 		!bus->rcv_from_other_engine)
5755 		return DBG_STATUS_NO_INPUT_ENABLED;
5756 
5757 	/* Check if too many input types were enabled (storm+dbgmux) */
5758 	if (bus->num_enabled_storms && bus->num_enabled_blocks)
5759 		return DBG_STATUS_TOO_MANY_INPUTS;
5760 
5761 	/* Configure framing mode */
5762 	if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5763 		return status;
5764 
5765 	/* Configure DBG block for Storm inputs */
5766 	if (bus->num_enabled_storms)
5767 		if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5768 			return status;
5769 
5770 	/* Configure DBG block for block inputs */
5771 	if (bus->num_enabled_blocks)
5772 		ecore_config_block_inputs(p_hwfn, p_ptt);
5773 
5774 	/* Configure filter type */
5775 	if (bus->filter_en) {
5776 		if (bus->trigger_en) {
5777 			if (bus->filter_pre_trigger)
5778 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5779 			else
5780 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5781 		}
5782 		else {
5783 			filter_type = DBG_BUS_FILTER_TYPE_ON;
5784 		}
5785 	}
5786 	else {
5787 		filter_type = DBG_BUS_FILTER_TYPE_OFF;
5788 	}
5789 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5790 
5791 	/* Restart timestamp */
5792 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5793 
5794 	/* Enable debug block */
5795 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5796 
5797 	/* Configure enabled blocks - must be done before the DBG block is
5798 	 * enabled.
5799 	 */
5800 	if (dev_data->bus.num_enabled_blocks) {
5801 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5802 			if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5803 				continue;
5804 
5805 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5806 				dev_data->bus.blocks[block_id].line_num,
5807 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5808 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5809 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5810 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5811 		}
5812 	}
5813 
5814 	/* Configure client mask */
5815 	ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5816 
5817 	/* Configure enabled Storms - must be done after the DBG block is
5818 	 * enabled.
5819 	 */
5820 	if (dev_data->bus.num_enabled_storms)
5821 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5822 			if (dev_data->bus.storms[storm_id].enabled)
5823 				ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id, filter_type);
5824 
5825 	dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5826 
5827 	return DBG_STATUS_OK;
5828 }
5829 
5830 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5831 								   struct ecore_ptt *p_ptt)
5832 {
5833 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5834 	struct dbg_bus_data *bus = &dev_data->bus;
5835 	enum dbg_status status = DBG_STATUS_OK;
5836 
5837 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5838 
5839 	if (bus->state != DBG_BUS_STATE_RECORDING)
5840 		return DBG_STATUS_RECORDING_NOT_STARTED;
5841 
5842 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5843 	if (status != DBG_STATUS_OK)
5844 		return status;
5845 
5846 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5847 
5848 	OSAL_MSLEEP(FLUSH_DELAY_MS);
5849 
5850 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5851 
5852 	/* Check if trigger worked */
5853 	if (bus->trigger_en) {
5854 		u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5855 
5856 		if (trigger_state != MAX_TRIGGER_STATES)
5857 			return DBG_STATUS_DATA_DIDNT_TRIGGER;
5858 	}
5859 
5860 	bus->state = DBG_BUS_STATE_STOPPED;
5861 
5862 	return status;
5863 }
5864 
5865 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5866 												struct ecore_ptt *p_ptt,
5867 												u32 *buf_size)
5868 {
5869 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5870 	struct dbg_bus_data *bus = &dev_data->bus;
5871 	enum dbg_status status;
5872 
5873 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5874 
5875 	*buf_size = 0;
5876 
5877 	if (status != DBG_STATUS_OK)
5878 		return status;
5879 
5880 	/* Add dump header */
5881 	*buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5882 
5883 	switch (bus->target) {
5884 	case DBG_BUS_TARGET_ID_INT_BUF:
5885 		*buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5886 	case DBG_BUS_TARGET_ID_PCI:
5887 		*buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5888 	default:
5889 		break;
5890 	}
5891 
5892 	/* Dump last section */
5893 	*buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5894 
5895 	return DBG_STATUS_OK;
5896 }
5897 
5898 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5899 								   struct ecore_ptt *p_ptt,
5900 								   u32 *dump_buf,
5901 								   u32 buf_size_in_dwords,
5902 								   u32 *num_dumped_dwords)
5903 {
5904 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5905 	u32 min_buf_size_in_dwords, block_id, offset = 0;
5906 	struct dbg_bus_data *bus = &dev_data->bus;
5907 	enum dbg_status status;
5908 	u8 storm_id;
5909 
5910 	*num_dumped_dwords = 0;
5911 
5912 	status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5913 	if (status != DBG_STATUS_OK)
5914 		return status;
5915 
5916 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5917 
5918 	if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5919 		return DBG_STATUS_RECORDING_NOT_STARTED;
5920 
5921 	if (bus->state == DBG_BUS_STATE_RECORDING) {
5922 		enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5923 		if (stop_state != DBG_STATUS_OK)
5924 			return stop_state;
5925 	}
5926 
5927 	if (buf_size_in_dwords < min_buf_size_in_dwords)
5928 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5929 
5930 	if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5931 		return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5932 
5933 	/* Dump header */
5934 	offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5935 
5936 	/* Dump recorded data */
5937 	if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5938 		u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5939 
5940 		if (!recorded_dwords)
5941 			return DBG_STATUS_NO_DATA_RECORDED;
5942 		if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5943 			return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5944 		offset += recorded_dwords;
5945 	}
5946 
5947 	/* Dump last section */
5948 	offset += ecore_dump_last_section(dump_buf, offset, true);
5949 
5950 	/* If recorded to PCI buffer - free the buffer */
5951 	ecore_bus_free_pci_buf(p_hwfn);
5952 
5953 	/* Clear debug bus parameters */
5954 	bus->state = DBG_BUS_STATE_IDLE;
5955 	bus->num_enabled_blocks = 0;
5956 	bus->num_enabled_storms = 0;
5957 	bus->filter_en = bus->trigger_en = 0;
5958 
5959 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5960 		SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5961 
5962 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5963 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5964 
5965 		storm_bus->enabled = false;
5966 		storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5967 	}
5968 
5969 	*num_dumped_dwords = offset;
5970 
5971 	return DBG_STATUS_OK;
5972 }
5973 
5974 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
5975 									 enum dbg_grc_params grc_param,
5976 									 u32 val)
5977 {
5978 	int i;
5979 
5980 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5981 
5982 	/* Initializes the GRC parameters (if not initialized). Needed in order
5983 	 * to set the default parameter values for the first time.
5984 	 */
5985 	ecore_dbg_grc_init_params(p_hwfn);
5986 
5987 	if (grc_param >= MAX_DBG_GRC_PARAMS)
5988 		return DBG_STATUS_INVALID_ARGS;
5989 	if (val < s_grc_param_defs[grc_param].min ||
5990 		val > s_grc_param_defs[grc_param].max)
5991 		return DBG_STATUS_INVALID_ARGS;
5992 
5993 	if (s_grc_param_defs[grc_param].is_preset) {
5994 
5995 		/* Preset param */
5996 
5997 		/* Disabling a preset is not allowed. Call
5998 		 * dbg_grc_set_params_default instead.
5999 		 */
6000 		if (!val)
6001 			return DBG_STATUS_INVALID_ARGS;
6002 
6003 		/* Update all params with the preset values */
6004 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6005 			u32 preset_val;
6006 
6007 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6008 				preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6009 			else if (grc_param == DBG_GRC_PARAM_CRASH)
6010 				preset_val = s_grc_param_defs[i].crash_preset_val;
6011 			else
6012 				return DBG_STATUS_INVALID_ARGS;
6013 
6014 			ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6015 		}
6016 	}
6017 	else {
6018 
6019 		/* Regular param - set its value */
6020 		ecore_grc_set_param(p_hwfn, grc_param, val);
6021 	}
6022 
6023 	return DBG_STATUS_OK;
6024 }
6025 
6026 /* Assign default GRC param values */
6027 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6028 {
6029 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6030 	u32 i;
6031 
6032 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6033 		dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6034 }
6035 
6036 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6037 												struct ecore_ptt *p_ptt,
6038 												u32 *buf_size)
6039 {
6040 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6041 
6042 	*buf_size = 0;
6043 
6044 	if (status != DBG_STATUS_OK)
6045 		return status;
6046 
6047 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6048 		!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6049 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6050 
6051 	return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6052 }
6053 
6054 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6055 								   struct ecore_ptt *p_ptt,
6056 								   u32 *dump_buf,
6057 								   u32 buf_size_in_dwords,
6058 								   u32 *num_dumped_dwords)
6059 {
6060 	u32 needed_buf_size_in_dwords;
6061 	enum dbg_status status;
6062 
6063 	*num_dumped_dwords = 0;
6064 
6065 	status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6066 	if (status != DBG_STATUS_OK)
6067 		return status;
6068 
6069 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6070 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6071 
6072 	/* Doesn't do anything, needed for compile time asserts */
6073 	ecore_static_asserts();
6074 
6075 	/* GRC Dump */
6076 	status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6077 
6078 	/* Reveret GRC params to their default */
6079 	ecore_dbg_grc_set_params_default(p_hwfn);
6080 
6081 	return status;
6082 }
6083 
6084 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6085 													 struct ecore_ptt *p_ptt,
6086 													 u32 *buf_size)
6087 {
6088 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6089 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6090 	enum dbg_status status;
6091 
6092 	*buf_size = 0;
6093 
6094 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6095 	if (status != DBG_STATUS_OK)
6096 		return status;
6097 
6098 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6099 		!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6100 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6101 
6102 	if (!idle_chk->buf_size_set) {
6103 		idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6104 		idle_chk->buf_size_set = true;
6105 	}
6106 
6107 	*buf_size = idle_chk->buf_size;
6108 
6109 	return DBG_STATUS_OK;
6110 }
6111 
6112 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6113 										struct ecore_ptt *p_ptt,
6114 										u32 *dump_buf,
6115 										u32 buf_size_in_dwords,
6116 										u32 *num_dumped_dwords)
6117 {
6118 	u32 needed_buf_size_in_dwords;
6119 	enum dbg_status status;
6120 
6121 	*num_dumped_dwords = 0;
6122 
6123 	status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6124 	if (status != DBG_STATUS_OK)
6125 		return status;
6126 
6127 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6128 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6129 
6130 	/* Update reset state */
6131 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6132 
6133 	/* Idle Check Dump */
6134 	*num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6135 
6136 	/* Reveret GRC params to their default */
6137 	ecore_dbg_grc_set_params_default(p_hwfn);
6138 
6139 	return DBG_STATUS_OK;
6140 }
6141 
6142 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6143 													  struct ecore_ptt *p_ptt,
6144 													  u32 *buf_size)
6145 {
6146 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6147 
6148 	*buf_size = 0;
6149 
6150 	if (status != DBG_STATUS_OK)
6151 		return status;
6152 
6153 	return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6154 }
6155 
6156 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6157 										 struct ecore_ptt *p_ptt,
6158 										 u32 *dump_buf,
6159 										 u32 buf_size_in_dwords,
6160 										 u32 *num_dumped_dwords)
6161 {
6162 	u32 needed_buf_size_in_dwords;
6163 	enum dbg_status status;
6164 
6165 	status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6166 	if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6167 		return status;
6168 
6169 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6170 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6171 
6172 	/* Update reset state */
6173 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6174 
6175 	/* Perform dump */
6176 	status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6177 
6178 	/* Reveret GRC params to their default */
6179 	ecore_dbg_grc_set_params_default(p_hwfn);
6180 
6181 	return status;
6182 }
6183 
6184 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6185 													 struct ecore_ptt *p_ptt,
6186 													 u32 *buf_size)
6187 {
6188 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6189 
6190 	*buf_size = 0;
6191 
6192 	if (status != DBG_STATUS_OK)
6193 		return status;
6194 
6195 	return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6196 }
6197 
6198 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6199 										struct ecore_ptt *p_ptt,
6200 										u32 *dump_buf,
6201 										u32 buf_size_in_dwords,
6202 										u32 *num_dumped_dwords)
6203 {
6204 	u32 needed_buf_size_in_dwords;
6205 	enum dbg_status status;
6206 
6207 	*num_dumped_dwords = 0;
6208 
6209 	status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6210 	if (status != DBG_STATUS_OK)
6211 		return status;
6212 
6213 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6214 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6215 
6216 	/* Update reset state */
6217 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6218 
6219 	status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6220 
6221 	/* Reveret GRC params to their default */
6222 	ecore_dbg_grc_set_params_default(p_hwfn);
6223 
6224 	return status;
6225 }
6226 
6227 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6228 													 struct ecore_ptt *p_ptt,
6229 													 u32 *buf_size)
6230 {
6231 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6232 
6233 	*buf_size = 0;
6234 
6235 	if (status != DBG_STATUS_OK)
6236 		return status;
6237 
6238 	return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6239 }
6240 
6241 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6242 										struct ecore_ptt *p_ptt,
6243 										u32 *dump_buf,
6244 										u32 buf_size_in_dwords,
6245 										u32 *num_dumped_dwords)
6246 {
6247 	u32 needed_buf_size_in_dwords;
6248 	enum dbg_status status;
6249 
6250 	*num_dumped_dwords = 0;
6251 
6252 	status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6253 	if (status != DBG_STATUS_OK)
6254 		return status;
6255 
6256 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6257 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6258 
6259 	/* Update reset state */
6260 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6261 
6262 	status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6263 
6264 	/* Reveret GRC params to their default */
6265 	ecore_dbg_grc_set_params_default(p_hwfn);
6266 
6267 	return status;
6268 }
6269 
6270 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6271 																struct ecore_ptt *p_ptt,
6272 																u32 *buf_size)
6273 {
6274 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6275 
6276 	*buf_size = 0;
6277 
6278 	if (status != DBG_STATUS_OK)
6279 		return status;
6280 
6281 	return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6282 }
6283 
6284 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6285 												   struct ecore_ptt *p_ptt,
6286 												   u32 *dump_buf,
6287 												   u32 buf_size_in_dwords,
6288 												   u32 *num_dumped_dwords)
6289 {
6290 	u32 needed_buf_size_in_dwords;
6291 	enum dbg_status status;
6292 
6293 	*num_dumped_dwords = 0;
6294 
6295 	status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6296 	if (status != DBG_STATUS_OK)
6297 		return status;
6298 
6299 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6300 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6301 
6302 	/* Update reset state */
6303 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6304 
6305 	status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6306 
6307 	/* Reveret GRC params to their default */
6308 	ecore_dbg_grc_set_params_default(p_hwfn);
6309 
6310 	return status;
6311 }
6312 
6313 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6314 													   struct ecore_ptt *p_ptt,
6315 													   u32 *buf_size)
6316 {
6317 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6318 
6319 	*buf_size = 0;
6320 
6321 	if (status != DBG_STATUS_OK)
6322 		return status;
6323 
6324 	/* Update reset state */
6325 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6326 
6327 	*buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6328 
6329 	return DBG_STATUS_OK;
6330 }
6331 
6332 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6333 										  struct ecore_ptt *p_ptt,
6334 										  u32 *dump_buf,
6335 										  u32 buf_size_in_dwords,
6336 										  u32 *num_dumped_dwords)
6337 {
6338 	u32 needed_buf_size_in_dwords;
6339 	enum dbg_status status;
6340 
6341 	*num_dumped_dwords = 0;
6342 
6343 	status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6344 	if (status != DBG_STATUS_OK)
6345 		return status;
6346 
6347 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6348 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6349 
6350 	*num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6351 
6352 	/* Reveret GRC params to their default */
6353 	ecore_dbg_grc_set_params_default(p_hwfn);
6354 
6355 	return DBG_STATUS_OK;
6356 }
6357 
6358 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6359 									struct ecore_ptt *p_ptt,
6360 									enum block_id block_id,
6361 									enum dbg_attn_type attn_type,
6362 									bool clear_status,
6363 									struct dbg_attn_block_result *results)
6364 {
6365 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6366 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
6367 	const struct dbg_attn_reg *attn_reg_arr;
6368 
6369 	if (status != DBG_STATUS_OK)
6370 		return status;
6371 
6372 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6373 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6374 
6375 	attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6376 
6377 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6378 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6379 		struct dbg_attn_reg_result *reg_result;
6380 		u32 sts_addr, sts_val;
6381 		u16 modes_buf_offset;
6382 		bool eval_mode;
6383 
6384 		/* Check mode */
6385 		eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6386 		modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6387 		if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6388 			continue;
6389 
6390 		/* Mode match - read attention status register */
6391 		sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6392 		sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6393 		if (!sts_val)
6394 			continue;
6395 
6396 		/* Non-zero attention status - add to results */
6397 		reg_result = &results->reg_results[num_result_regs];
6398 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6399 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6400 		reg_result->block_attn_offset = reg_data->block_attn_offset;
6401 		reg_result->sts_val = sts_val;
6402 		reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6403 		num_result_regs++;
6404 	}
6405 
6406 	results->block_id = (u8)block_id;
6407 	results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6408 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6409 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6410 
6411 	return DBG_STATUS_OK;
6412 }
6413 
6414 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6415 									 struct dbg_attn_block_result *results)
6416 {
6417 	enum dbg_attn_type attn_type;
6418 	u8 num_regs, i;
6419 
6420 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6421 	attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6422 
6423 	for (i = 0; i < num_regs; i++) {
6424 		struct dbg_attn_reg_result *reg_result;
6425 		const char *attn_type_str;
6426 		u32 sts_addr;
6427 
6428 		reg_result = &results->reg_results[i];
6429 		attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6430 		sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6431 		DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6432 	}
6433 
6434 	return DBG_STATUS_OK;
6435 }
6436 
6437 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6438 							 struct ecore_ptt *p_ptt,
6439 							 enum block_id block_id)
6440 {
6441 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6442 	struct block_defs *block = s_block_defs[block_id];
6443 	u32 reset_reg;
6444 
6445 	if (!block->has_reset_bit)
6446 		return false;
6447 
6448 	reset_reg = block->reset_reg;
6449 
6450 	return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6451 		!(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) :	true;
6452 }
6453 
6454