1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_dbg_fw_funcs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "bcm_osal.h"
35 #include "ecore.h"
36 #include "ecore_hw.h"
37 #include "ecore_mcp.h"
38 #include "spad_layout.h"
39 #include "nvm_map.h"
40 #include "reg_addr.h"
41 #include "ecore_hsi_common.h"
42 #include "ecore_hsi_debug_tools.h"
43 #include "mcp_public.h"
44 #include "nvm_map.h"
45 #ifndef USE_DBG_BIN_FILE
46 #include "ecore_dbg_values.h"
47 #endif
48 #include "ecore_dbg_fw_funcs.h"
49 
50 /* Memory groups enum */
51 enum mem_groups {
52 	MEM_GROUP_PXP_MEM,
53 	MEM_GROUP_DMAE_MEM,
54 	MEM_GROUP_CM_MEM,
55 	MEM_GROUP_QM_MEM,
56 	MEM_GROUP_DORQ_MEM,
57 	MEM_GROUP_BRB_RAM,
58 	MEM_GROUP_BRB_MEM,
59 	MEM_GROUP_PRS_MEM,
60 	MEM_GROUP_IOR,
61 	MEM_GROUP_BTB_RAM,
62 	MEM_GROUP_CONN_CFC_MEM,
63 	MEM_GROUP_TASK_CFC_MEM,
64 	MEM_GROUP_CAU_PI,
65 	MEM_GROUP_CAU_MEM,
66 	MEM_GROUP_PXP_ILT,
67 	MEM_GROUP_TM_MEM,
68 	MEM_GROUP_SDM_MEM,
69 	MEM_GROUP_PBUF,
70 	MEM_GROUP_RAM,
71 	MEM_GROUP_MULD_MEM,
72 	MEM_GROUP_BTB_MEM,
73 	MEM_GROUP_RDIF_CTX,
74 	MEM_GROUP_TDIF_CTX,
75 	MEM_GROUP_CFC_MEM,
76 	MEM_GROUP_IGU_MEM,
77 	MEM_GROUP_IGU_MSIX,
78 	MEM_GROUP_CAU_SB,
79 	MEM_GROUP_BMB_RAM,
80 	MEM_GROUP_BMB_MEM,
81 	MEM_GROUPS_NUM
82 };
83 
84 /* Memory groups names */
85 static const char* s_mem_group_names[] = {
86 	"PXP_MEM",
87 	"DMAE_MEM",
88 	"CM_MEM",
89 	"QM_MEM",
90 	"DORQ_MEM",
91 	"BRB_RAM",
92 	"BRB_MEM",
93 	"PRS_MEM",
94 	"IOR",
95 	"BTB_RAM",
96 	"CONN_CFC_MEM",
97 	"TASK_CFC_MEM",
98 	"CAU_PI",
99 	"CAU_MEM",
100 	"PXP_ILT",
101 	"TM_MEM",
102 	"SDM_MEM",
103 	"PBUF",
104 	"RAM",
105 	"MULD_MEM",
106 	"BTB_MEM",
107 	"RDIF_CTX",
108 	"TDIF_CTX",
109 	"CFC_MEM",
110 	"IGU_MEM",
111 	"IGU_MSIX",
112 	"CAU_SB",
113 	"BMB_RAM",
114 	"BMB_MEM",
115 };
116 
117 /* Idle check conditions */
118 
119 #ifndef __PREVENT_COND_ARR__
120 
121 static u32 cond5(const u32 *r, const u32 *imm) {
122 	return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
123 }
124 
125 static u32 cond7(const u32 *r, const u32 *imm) {
126 	return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
127 }
128 
129 static u32 cond6(const u32 *r, const u32 *imm) {
130 	return ((r[0] & imm[0]) != imm[1]);
131 }
132 
133 static u32 cond9(const u32 *r, const u32 *imm) {
134 	return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
135 }
136 
137 static u32 cond10(const u32 *r, const u32 *imm) {
138 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
139 }
140 
141 static u32 cond4(const u32 *r, const u32 *imm) {
142 	return ((r[0] & ~imm[0]) != imm[1]);
143 }
144 
145 static u32 cond0(const u32 *r, const u32 *imm) {
146 	return ((r[0] & ~r[1]) != imm[0]);
147 }
148 
149 static u32 cond1(const u32 *r, const u32 *imm) {
150 	return (r[0] != imm[0]);
151 }
152 
153 static u32 cond11(const u32 *r, const u32 *imm) {
154 	return (r[0] != r[1] && r[2] == imm[0]);
155 }
156 
157 static u32 cond12(const u32 *r, const u32 *imm) {
158 	return (r[0] != r[1] && r[2] > imm[0]);
159 }
160 
161 static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
162 	return (r[0] != r[1]);
163 }
164 
165 static u32 cond13(const u32 *r, const u32 *imm) {
166 	return (r[0] & imm[0]);
167 }
168 
169 static u32 cond8(const u32 *r, const u32 *imm) {
170 	return (r[0] < (r[1] - imm[0]));
171 }
172 
173 static u32 cond2(const u32 *r, const u32 *imm) {
174 	return (r[0] > imm[0]);
175 }
176 
177 /* Array of Idle Check conditions */
178 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
179 	cond0,
180 	cond1,
181 	cond2,
182 	cond3,
183 	cond4,
184 	cond5,
185 	cond6,
186 	cond7,
187 	cond8,
188 	cond9,
189 	cond10,
190 	cond11,
191 	cond12,
192 	cond13,
193 };
194 
195 #endif /* __PREVENT_COND_ARR__ */
196 
197 
198 /******************************* Data Types **********************************/
199 
200 enum platform_ids {
201 	PLATFORM_ASIC,
202 	PLATFORM_EMUL_FULL,
203 	PLATFORM_EMUL_REDUCED,
204 	PLATFORM_FPGA,
205 	MAX_PLATFORM_IDS
206 };
207 
208 struct chip_platform_defs {
209 	u8 num_ports;
210 	u8 num_pfs;
211 	u8 num_vfs;
212 };
213 
214 /* Chip constant definitions */
215 struct chip_defs {
216 	const char *name;
217 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
218 };
219 
220 /* Platform constant definitions */
221 struct platform_defs {
222 	const char *name;
223 	u32 delay_factor;
224 	u32 dmae_thresh;
225 	u32 log_thresh;
226 };
227 
228 /* Storm constant definitions.
229  * Addresses are in bytes, sizes are in quad-regs.
230  */
231 struct storm_defs {
232 	char letter;
233 	enum block_id block_id;
234 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
235 	bool has_vfc;
236 	u32 sem_fast_mem_addr;
237 	u32 sem_frame_mode_addr;
238 	u32 sem_slow_enable_addr;
239 	u32 sem_slow_mode_addr;
240 	u32 sem_slow_mode1_conf_addr;
241 	u32 sem_sync_dbg_empty_addr;
242 	u32 sem_slow_dbg_empty_addr;
243 	u32 cm_ctx_wr_addr;
244 	u32 cm_conn_ag_ctx_lid_size;
245 	u32 cm_conn_ag_ctx_rd_addr;
246 	u32 cm_conn_st_ctx_lid_size;
247 	u32 cm_conn_st_ctx_rd_addr;
248 	u32 cm_task_ag_ctx_lid_size;
249 	u32 cm_task_ag_ctx_rd_addr;
250 	u32 cm_task_st_ctx_lid_size;
251 	u32 cm_task_st_ctx_rd_addr;
252 };
253 
254 /* Block constant definitions */
255 struct block_defs {
256 	const char *name;
257 	bool exists[MAX_CHIP_IDS];
258 	bool associated_to_storm;
259 
260 	/* Valid only if associated_to_storm is true */
261 	u32 storm_id;
262 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
263 	u32 dbg_select_addr;
264 	u32 dbg_enable_addr;
265 	u32 dbg_shift_addr;
266 	u32 dbg_force_valid_addr;
267 	u32 dbg_force_frame_addr;
268 	bool has_reset_bit;
269 
270 	/* If true, block is taken out of reset before dump */
271 	bool unreset;
272 	enum dbg_reset_regs reset_reg;
273 
274 	/* Bit offset in reset register */
275 	u8 reset_bit_offset;
276 };
277 
278 /* Reset register definitions */
279 struct reset_reg_defs {
280 	u32 addr;
281 	bool exists[MAX_CHIP_IDS];
282 	u32 unreset_val[MAX_CHIP_IDS];
283 };
284 
285 /* Debug Bus Constraint operation constant definitions */
286 struct dbg_bus_constraint_op_defs {
287 	u8 hw_op_val;
288 	bool is_cyclic;
289 };
290 
291 /* Storm Mode definitions */
292 struct storm_mode_defs {
293 	const char *name;
294 	bool is_fast_dbg;
295 	u8 id_in_hw;
296 };
297 
298 struct grc_param_defs {
299 	u32 default_val[MAX_CHIP_IDS];
300 	u32 min;
301 	u32 max;
302 	bool is_preset;
303 	u32 exclude_all_preset_val;
304 	u32 crash_preset_val;
305 };
306 
307 /* address is in 128b units. Width is in bits. */
308 struct rss_mem_defs {
309 	const char *mem_name;
310 	const char *type_name;
311 	u32 addr;
312 	u32 entry_width;
313 	u32 num_entries[MAX_CHIP_IDS];
314 };
315 
316 struct vfc_ram_defs {
317 	const char *mem_name;
318 	const char *type_name;
319 	u32 base_row;
320 	u32 num_rows;
321 };
322 
323 struct big_ram_defs {
324 	const char *instance_name;
325 	enum mem_groups mem_group_id;
326 	enum mem_groups ram_mem_group_id;
327 	enum dbg_grc_params grc_param;
328 	u32 addr_reg_addr;
329 	u32 data_reg_addr;
330 	u32 is_256b_reg_addr;
331 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
332 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
333 };
334 
335 struct phy_defs {
336 	const char *phy_name;
337 
338 	/* PHY base GRC address */
339 	u32 base_addr;
340 
341 	/* Relative address of indirect TBUS address register (bits 0..7) */
342 	u32 tbus_addr_lo_addr;
343 
344 	/* Relative address of indirect TBUS address register (bits 8..10) */
345 	u32 tbus_addr_hi_addr;
346 
347 	/* Relative address of indirect TBUS data register (bits 0..7) */
348 	u32 tbus_data_lo_addr;
349 
350 	/* Relative address of indirect TBUS data register (bits 8..11) */
351 	u32 tbus_data_hi_addr;
352 };
353 
354 /******************************** Constants **********************************/
355 
356 #define MAX_LCIDS			320
357 #define MAX_LTIDS			320
358 
359 #define NUM_IOR_SETS			2
360 #define IORS_PER_SET			176
361 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
362 
363 #define BYTES_IN_DWORD			sizeof(u32)
364 
365 /* Cyclic  right */
366 #define SHR(val, val_width, amount)	(((val) | ((val) << (val_width))) 					>> (amount)) & ((1 << (val_width)) - 1)
367 
368 /* In the macros below, size and offset are specified in bits */
369 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
370 #define FIELD_BIT_OFFSET(type, field)	type##_##field##_##OFFSET
371 #define FIELD_BIT_SIZE(type, field)	type##_##field##_##SIZE
372 #define FIELD_DWORD_OFFSET(type, field)		(int)(FIELD_BIT_OFFSET(type, field) / 32)
373 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
374 #define FIELD_BIT_MASK(type, field)		(((1 << FIELD_BIT_SIZE(type, field)) - 1) 	<< FIELD_DWORD_SHIFT(type, field))
375 
376 #define SET_VAR_FIELD(var, type, field, val) 	var[FIELD_DWORD_OFFSET(type, field)] &= 		(~FIELD_BIT_MASK(type, field)); 	var[FIELD_DWORD_OFFSET(type, field)] |= 		(val) << FIELD_DWORD_SHIFT(type, field)
377 
378 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		ecore_wr(dev, ptt, addr, (arr)[i])
379 
380 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		(arr)[i] = ecore_rd(dev, ptt, addr)
381 
382 #define CHECK_ARR_SIZE(arr, size) 	OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
383 
384 #ifndef DWORDS_TO_BYTES
385 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
386 #endif
387 #ifndef BYTES_TO_DWORDS
388 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
389 #endif
390 
391 /* extra lines include a signature line + optional latency events line */
392 #ifndef NUM_DBG_LINES
393 #define NUM_EXTRA_DBG_LINES(block_desc)		(1 + (block_desc->has_latency_events ? 1 : 0))
394 #define NUM_DBG_LINES(block_desc)		(block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
395 #endif
396 
397 #define USE_DMAE				true
398 #define PROTECT_WIDE_BUS		true
399 
400 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
401 #define RAM_LINES_TO_BYTES(lines)		DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
402 
403 #define REG_DUMP_LEN_SHIFT		24
404 #define MEM_DUMP_ENTRY_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
405 
406 #define IDLE_CHK_RULE_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
407 
408 #define IDLE_CHK_RESULT_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
409 
410 #define IDLE_CHK_RESULT_REG_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
411 
412 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
413 
414 /* The sizes and offsets below are specified in bits */
415 #define VFC_CAM_CMD_STRUCT_SIZE		64
416 #define VFC_CAM_CMD_ROW_OFFSET		48
417 #define VFC_CAM_CMD_ROW_SIZE		9
418 #define VFC_CAM_ADDR_STRUCT_SIZE	16
419 #define VFC_CAM_ADDR_OP_OFFSET		0
420 #define VFC_CAM_ADDR_OP_SIZE		4
421 #define VFC_CAM_RESP_STRUCT_SIZE	256
422 #define VFC_RAM_ADDR_STRUCT_SIZE	16
423 #define VFC_RAM_ADDR_OP_OFFSET		0
424 #define VFC_RAM_ADDR_OP_SIZE		2
425 #define VFC_RAM_ADDR_ROW_OFFSET		2
426 #define VFC_RAM_ADDR_ROW_SIZE		10
427 #define VFC_RAM_RESP_STRUCT_SIZE	256
428 
429 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
430 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
431 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
432 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
433 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
434 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
435 
436 #define NUM_VFC_RAM_TYPES		4
437 
438 #define VFC_CAM_NUM_ROWS		512
439 
440 #define VFC_OPCODE_CAM_RD		14
441 #define VFC_OPCODE_RAM_RD		0
442 
443 #define NUM_RSS_MEM_TYPES		5
444 
445 #define NUM_BIG_RAM_TYPES		3
446 
447 #define NUM_PHY_TBUS_ADDRESSES		2048
448 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
449 
450 #define SEM_FAST_MODE23_SRC_ENABLE_VAL	0x0
451 #define SEM_FAST_MODE23_SRC_DISABLE_VAL	0x7
452 #define SEM_FAST_MODE4_SRC_ENABLE_VAL	0x0
453 #define SEM_FAST_MODE4_SRC_DISABLE_VAL	0x3
454 #define SEM_FAST_MODE6_SRC_ENABLE_VAL	0x10
455 #define SEM_FAST_MODE6_SRC_DISABLE_VAL	0x3f
456 
457 #define SEM_SLOW_MODE1_DATA_ENABLE	0x1
458 
459 #define VALUES_PER_CYCLE		4
460 #define MAX_CYCLE_VALUES_MASK		((1 << VALUES_PER_CYCLE) - 1)
461 
462 #define MAX_DWORDS_PER_CYCLE		8
463 
464 #define HW_ID_BITS			3
465 
466 #define NUM_CALENDAR_SLOTS		16
467 
468 #define MAX_TRIGGER_STATES		3
469 #define TRIGGER_SETS_PER_STATE		2
470 #define MAX_CONSTRAINTS			4
471 
472 #define SEM_FILTER_CID_EN_MASK		0x00b
473 #define SEM_FILTER_EID_MASK_EN_MASK	0x013
474 #define SEM_FILTER_EID_RANGE_EN_MASK	0x113
475 
476 #define CHUNK_SIZE_IN_DWORDS		64
477 #define CHUNK_SIZE_IN_BYTES		DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
478 
479 #define INT_BUF_NUM_OF_LINES		192
480 #define INT_BUF_LINE_SIZE_IN_DWORDS	16
481 #define INT_BUF_SIZE_IN_DWORDS			(INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
482 #define INT_BUF_SIZE_IN_CHUNKS			(INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
483 
484 #define PCI_BUF_LINE_SIZE_IN_DWORDS	8
485 #define PCI_BUF_LINE_SIZE_IN_BYTES		DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
486 
487 #define TARGET_EN_MASK_PCI		0x3
488 #define TARGET_EN_MASK_NIG		0x4
489 
490 #define PCI_REQ_CREDIT			1
491 #define PCI_PHYS_ADDR_TYPE		0
492 
493 #define OPAQUE_FID(pci_func)		((pci_func << 4) | 0xff00)
494 
495 #define RESET_REG_UNRESET_OFFSET	4
496 
497 #define PCI_PKT_SIZE_IN_CHUNKS		1
498 #define PCI_PKT_SIZE_IN_BYTES			(PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
499 
500 #define NIG_PKT_SIZE_IN_CHUNKS		4
501 
502 #define FLUSH_DELAY_MS			500
503 #define STALL_DELAY_MS			500
504 
505 #define SRC_MAC_ADDR_LO16		0x0a0b
506 #define SRC_MAC_ADDR_HI32		0x0c0d0e0f
507 #define ETH_TYPE			0x1000
508 
509 #define STATIC_DEBUG_LINE_DWORDS	9
510 
511 #define NUM_COMMON_GLOBAL_PARAMS	8
512 
513 #define FW_IMG_KUKU			0
514 #define FW_IMG_MAIN			1
515 #define FW_IMG_L2B			2
516 
517 #ifndef REG_FIFO_ELEMENT_DWORDS
518 #define REG_FIFO_ELEMENT_DWORDS		2
519 #endif
520 #define REG_FIFO_DEPTH_ELEMENTS		32
521 #define REG_FIFO_DEPTH_DWORDS			(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
522 
523 #ifndef IGU_FIFO_ELEMENT_DWORDS
524 #define IGU_FIFO_ELEMENT_DWORDS		4
525 #endif
526 #define IGU_FIFO_DEPTH_ELEMENTS		64
527 #define IGU_FIFO_DEPTH_DWORDS			(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
528 
529 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS	5
530 #define SEMI_SYNC_FIFO_POLLING_COUNT	20
531 
532 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
533 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
534 #endif
535 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
536 #define PROTECTION_OVERRIDE_DEPTH_DWORDS   	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS 	* PROTECTION_OVERRIDE_ELEMENT_DWORDS)
537 
538 #define MCP_SPAD_TRACE_OFFSIZE_ADDR		(MCP_REG_SCRATCH + 	OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
539 
540 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
541 #define EMPTY_FW_IMAGE_STR		"???????????????"
542 
543 
544 /***************************** Constant Arrays *******************************/
545 
546 struct dbg_array {
547 	const u32 *ptr;
548 	u32 size_in_dwords;
549 };
550 
551 /* Debug arrays */
552 #ifdef USE_DBG_BIN_FILE
553 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
554 #else
555 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
556 
557 	/* BIN_BUF_DBG_MODE_TREE */
558 	{ (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
559 
560 	/* BIN_BUF_DBG_DUMP_REG */
561 	{ dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
562 
563 	/* BIN_BUF_DBG_DUMP_MEM */
564 	{ dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
565 
566 	/* BIN_BUF_DBG_IDLE_CHK_REGS */
567 	{ idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
568 
569 	/* BIN_BUF_DBG_IDLE_CHK_IMMS */
570 	{ idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
571 
572 	/* BIN_BUF_DBG_IDLE_CHK_RULES */
573 	{ idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
574 
575 	/* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
576 	{ OSAL_NULL, 0 },
577 
578 	/* BIN_BUF_DBG_ATTN_BLOCKS */
579 	{ attn_block, OSAL_ARRAY_SIZE(attn_block) },
580 
581 	/* BIN_BUF_DBG_ATTN_REGSS */
582 	{ attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
583 
584 	/* BIN_BUF_DBG_ATTN_INDEXES */
585 	{ OSAL_NULL, 0 },
586 
587 	/* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
588 	{ OSAL_NULL, 0 },
589 
590 	/* BIN_BUF_DBG_BUS_BLOCKS */
591 	{ dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
592 
593 	/* BIN_BUF_DBG_BUS_LINES */
594 	{ dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
595 
596 	/* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
597 	{ OSAL_NULL, 0 },
598 
599 	/* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
600 	{ OSAL_NULL, 0 },
601 
602 	/* BIN_BUF_DBG_PARSING_STRINGS */
603 	{ OSAL_NULL, 0 }
604 };
605 #endif
606 
607 /* Chip constant definitions array */
608 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
609 	{ "bb",
610 
611 		/* ASIC */
612 		{ { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
613 
614 		/* EMUL_FULL */
615 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
616 
617 		/* EMUL_REDUCED */
618 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
619 
620 		/* FPGA */
621 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
622 
623 	{ "ah",
624 
625 		/* ASIC */
626 		{ { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
627 
628 		/* EMUL_FULL */
629 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
630 
631 		/* EMUL_REDUCED */
632 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
633 
634 		/* FPGA */
635 		{ MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
636 
637 	{ "e5",
638 
639 		/* ASIC */
640 		{ { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
641 
642 		/* EMUL_FULL */
643 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
644 
645 		/* EMUL_REDUCED */
646 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
647 
648 		/* FPGA */
649 		{ MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
650 };
651 
652 /* Storm constant definitions array */
653 static struct storm_defs s_storm_defs[] = {
654 
655 	/* Tstorm */
656 	{	'T', BLOCK_TSEM,
657 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
658 		TSEM_REG_FAST_MEMORY,
659 		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
660 		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
661 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
662 		TCM_REG_CTX_RBC_ACCS,
663 		4, TCM_REG_AGG_CON_CTX,
664 		16, TCM_REG_SM_CON_CTX,
665 		2, TCM_REG_AGG_TASK_CTX,
666 		4, TCM_REG_SM_TASK_CTX },
667 
668 	/* Mstorm */
669 	{	'M', BLOCK_MSEM,
670 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
671 		MSEM_REG_FAST_MEMORY,
672 		MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
673 		MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
674 		MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
675 		MCM_REG_CTX_RBC_ACCS,
676 		1, MCM_REG_AGG_CON_CTX,
677 		10, MCM_REG_SM_CON_CTX,
678 		2, MCM_REG_AGG_TASK_CTX,
679 		7, MCM_REG_SM_TASK_CTX },
680 
681 	/* Ustorm */
682 	{	'U', BLOCK_USEM,
683 		{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
684 		USEM_REG_FAST_MEMORY,
685 		USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
686 		USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
687 		USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
688 		UCM_REG_CTX_RBC_ACCS,
689 		2, UCM_REG_AGG_CON_CTX,
690 		13, UCM_REG_SM_CON_CTX,
691 		3, UCM_REG_AGG_TASK_CTX,
692 		3, UCM_REG_SM_TASK_CTX },
693 
694 	/* Xstorm */
695 	{	'X', BLOCK_XSEM,
696 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
697 		XSEM_REG_FAST_MEMORY,
698 		XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
699 		XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
700 		XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
701 		XCM_REG_CTX_RBC_ACCS,
702 		9, XCM_REG_AGG_CON_CTX,
703 		15, XCM_REG_SM_CON_CTX,
704 		0, 0,
705 		0, 0 },
706 
707 	/* Ystorm */
708 	{	'Y', BLOCK_YSEM,
709 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
710 		YSEM_REG_FAST_MEMORY,
711 		YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
712 		YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
713 		YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
714 		YCM_REG_CTX_RBC_ACCS,
715 		2, YCM_REG_AGG_CON_CTX,
716 		3, YCM_REG_SM_CON_CTX,
717 		2, YCM_REG_AGG_TASK_CTX,
718 		12, YCM_REG_SM_TASK_CTX },
719 
720 	/* Pstorm */
721 	{	'P', BLOCK_PSEM,
722 		{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
723 		PSEM_REG_FAST_MEMORY,
724 		PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
725 		PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
726 		PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
727 		PCM_REG_CTX_RBC_ACCS,
728 		0, 0,
729 		10, PCM_REG_SM_CON_CTX,
730 		0, 0,
731 		0, 0 }
732 };
733 
734 /* Block definitions array */
735 
736 static struct block_defs block_grc_defs = {
737 	"grc", { true, true, true }, false, 0,
738 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
739 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
740 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
741 	GRC_REG_DBG_FORCE_FRAME,
742 	true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
743 
744 static struct block_defs block_miscs_defs = {
745 	"miscs", { true, true, true }, false, 0,
746 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
747 	0, 0, 0, 0, 0,
748 	false, false, MAX_DBG_RESET_REGS, 0 };
749 
750 static struct block_defs block_misc_defs = {
751 	"misc", { true, true, true }, false, 0,
752 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
753 	0, 0, 0, 0, 0,
754 	false, false, MAX_DBG_RESET_REGS, 0 };
755 
756 static struct block_defs block_dbu_defs = {
757 	"dbu", { true, true, true }, false, 0,
758 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
759 	0, 0, 0, 0, 0,
760 	false, false, MAX_DBG_RESET_REGS, 0 };
761 
762 static struct block_defs block_pglue_b_defs = {
763 	"pglue_b", { true, true, true }, false, 0,
764 	{ DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
765 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
766 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
767 	PGLUE_B_REG_DBG_FORCE_FRAME,
768 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
769 
770 static struct block_defs block_cnig_defs = {
771 	"cnig", { true, true, true }, false, 0,
772 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
773 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
774 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
775 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
776 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
777 
778 static struct block_defs block_cpmu_defs = {
779 	"cpmu", { true, true, true }, false, 0,
780 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
781 	0, 0, 0, 0, 0,
782 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
783 
784 static struct block_defs block_ncsi_defs = {
785 	"ncsi", { true, true, true }, false, 0,
786 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
787 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
788 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
789 	NCSI_REG_DBG_FORCE_FRAME,
790 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
791 
792 static struct block_defs block_opte_defs = {
793 	"opte", { true, true, false }, false, 0,
794 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
795 	0, 0, 0, 0, 0,
796 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
797 
798 static struct block_defs block_bmb_defs = {
799 	"bmb", { true, true, true }, false, 0,
800 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
801 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
802 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
803 	BMB_REG_DBG_FORCE_FRAME,
804 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
805 
806 static struct block_defs block_pcie_defs = {
807 	"pcie", { true, true, true }, false, 0,
808 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
809 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
810 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
811 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
812 	false, false, MAX_DBG_RESET_REGS, 0 };
813 
814 static struct block_defs block_mcp_defs = {
815 	"mcp", { true, true, true }, false, 0,
816 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
817 	0, 0, 0, 0, 0,
818 	false, false, MAX_DBG_RESET_REGS, 0 };
819 
820 static struct block_defs block_mcp2_defs = {
821 	"mcp2", { true, true, true }, false, 0,
822 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
823 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
824 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
825 	MCP2_REG_DBG_FORCE_FRAME,
826 	false, false, MAX_DBG_RESET_REGS, 0 };
827 
828 static struct block_defs block_pswhst_defs = {
829 	"pswhst", { true, true, true }, false, 0,
830 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
831 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
832 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
833 	PSWHST_REG_DBG_FORCE_FRAME,
834 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
835 
836 static struct block_defs block_pswhst2_defs = {
837 	"pswhst2", { true, true, true }, false, 0,
838 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
839 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
840 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
841 	PSWHST2_REG_DBG_FORCE_FRAME,
842 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
843 
844 static struct block_defs block_pswrd_defs = {
845 	"pswrd", { true, true, true }, false, 0,
846 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
847 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
848 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
849 	PSWRD_REG_DBG_FORCE_FRAME,
850 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
851 
852 static struct block_defs block_pswrd2_defs = {
853 	"pswrd2", { true, true, true }, false, 0,
854 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
855 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
856 	PSWRD2_REG_DBG_SHIFT,	PSWRD2_REG_DBG_FORCE_VALID,
857 	PSWRD2_REG_DBG_FORCE_FRAME,
858 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
859 
860 static struct block_defs block_pswwr_defs = {
861 	"pswwr", { true, true, true }, false, 0,
862 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
863 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
864 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
865 	PSWWR_REG_DBG_FORCE_FRAME,
866 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
867 
868 static struct block_defs block_pswwr2_defs = {
869 	"pswwr2", { true, true, true }, false, 0,
870 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
871 	0, 0, 0, 0, 0,
872 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
873 
874 static struct block_defs block_pswrq_defs = {
875 	"pswrq", { true, true, true }, false, 0,
876 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
877 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
878 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
879 	PSWRQ_REG_DBG_FORCE_FRAME,
880 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
881 
882 static struct block_defs block_pswrq2_defs = {
883 	"pswrq2", { true, true, true }, false, 0,
884 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
885 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
886 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
887 	PSWRQ2_REG_DBG_FORCE_FRAME,
888 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
889 
890 static struct block_defs block_pglcs_defs =	{
891 	"pglcs", { true, true, true }, false, 0,
892 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
893 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
894 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
895 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
896 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
897 
898 static struct block_defs block_ptu_defs ={
899 	"ptu", { true, true, true }, false, 0,
900 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
901 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
902 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
903 	PTU_REG_DBG_FORCE_FRAME,
904 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
905 
906 static struct block_defs block_dmae_defs = {
907 	"dmae", { true, true, true }, false, 0,
908 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
909 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
910 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
911 	DMAE_REG_DBG_FORCE_FRAME,
912 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
913 
914 static struct block_defs block_tcm_defs = {
915 	"tcm", { true, true, true }, true, DBG_TSTORM_ID,
916 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
917 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
918 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
919 	TCM_REG_DBG_FORCE_FRAME,
920 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
921 
922 static struct block_defs block_mcm_defs = {
923 	"mcm", { true, true, true }, true, DBG_MSTORM_ID,
924 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
925 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
926 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
927 	MCM_REG_DBG_FORCE_FRAME,
928 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
929 
930 static struct block_defs block_ucm_defs = {
931 	"ucm", { true, true, true }, true, DBG_USTORM_ID,
932 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
933 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
934 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
935 	UCM_REG_DBG_FORCE_FRAME,
936 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
937 
938 static struct block_defs block_xcm_defs = {
939 	"xcm", { true, true, true }, true, DBG_XSTORM_ID,
940 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
941 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
942 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
943 	XCM_REG_DBG_FORCE_FRAME,
944 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
945 
946 static struct block_defs block_ycm_defs = {
947 	"ycm", { true, true, true }, true, DBG_YSTORM_ID,
948 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
949 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
950 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
951 	YCM_REG_DBG_FORCE_FRAME,
952 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
953 
954 static struct block_defs block_pcm_defs = {
955 	"pcm", { true, true, true }, true, DBG_PSTORM_ID,
956 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
957 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
958 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
959 	PCM_REG_DBG_FORCE_FRAME,
960 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
961 
962 static struct block_defs block_qm_defs = {
963 	"qm", { true, true, true }, false, 0,
964 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
965 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
966 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
967 	QM_REG_DBG_FORCE_FRAME,
968 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
969 
970 static struct block_defs block_tm_defs = {
971 	"tm", { true, true, true }, false, 0,
972 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
973 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
974 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
975 	TM_REG_DBG_FORCE_FRAME,
976 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
977 
978 static struct block_defs block_dorq_defs = {
979 	"dorq", { true, true, true }, false, 0,
980 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
981 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
982 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
983 	DORQ_REG_DBG_FORCE_FRAME,
984 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
985 
986 static struct block_defs block_brb_defs = {
987 	"brb", { true, true, true }, false, 0,
988 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
989 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
990 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
991 	BRB_REG_DBG_FORCE_FRAME,
992 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
993 
994 static struct block_defs block_src_defs = {
995 	"src", { true, true, true }, false, 0,
996 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
997 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
998 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
999 	SRC_REG_DBG_FORCE_FRAME,
1000 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
1001 
1002 static struct block_defs block_prs_defs = {
1003 	"prs", { true, true, true }, false, 0,
1004 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
1005 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1006 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1007 	PRS_REG_DBG_FORCE_FRAME,
1008 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1009 
1010 static struct block_defs block_tsdm_defs = {
1011 	"tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1012 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1013 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1014 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1015 	TSDM_REG_DBG_FORCE_FRAME,
1016 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1017 
1018 static struct block_defs block_msdm_defs = {
1019 	"msdm", { true, true, true }, true, DBG_MSTORM_ID,
1020 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1021 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1022 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1023 	MSDM_REG_DBG_FORCE_FRAME,
1024 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1025 
1026 static struct block_defs block_usdm_defs = {
1027 	"usdm", { true, true, true }, true, DBG_USTORM_ID,
1028 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1029 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1030 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1031 	USDM_REG_DBG_FORCE_FRAME,
1032 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1033 	};
1034 static struct block_defs block_xsdm_defs = {
1035 	"xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1036 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1037 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1038 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1039 	XSDM_REG_DBG_FORCE_FRAME,
1040 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1041 
1042 static struct block_defs block_ysdm_defs = {
1043 	"ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1044 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1045 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1046 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1047 	YSDM_REG_DBG_FORCE_FRAME,
1048 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1049 
1050 static struct block_defs block_psdm_defs = {
1051 	"psdm", { true, true, true }, true, DBG_PSTORM_ID,
1052 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1053 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1054 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1055 	PSDM_REG_DBG_FORCE_FRAME,
1056 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1057 
1058 static struct block_defs block_tsem_defs = {
1059 	"tsem", { true, true, true }, true, DBG_TSTORM_ID,
1060 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1061 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1062 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1063 	TSEM_REG_DBG_FORCE_FRAME,
1064 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1065 
1066 static struct block_defs block_msem_defs = {
1067 	"msem", { true, true, true }, true, DBG_MSTORM_ID,
1068 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1069 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1070 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1071 	MSEM_REG_DBG_FORCE_FRAME,
1072 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1073 
1074 static struct block_defs block_usem_defs = {
1075 	"usem", { true, true, true }, true, DBG_USTORM_ID,
1076 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1077 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1078 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1079 	USEM_REG_DBG_FORCE_FRAME,
1080 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1081 
1082 static struct block_defs block_xsem_defs = {
1083 	"xsem", { true, true, true }, true, DBG_XSTORM_ID,
1084 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1085 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1086 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1087 	XSEM_REG_DBG_FORCE_FRAME,
1088 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1089 
1090 static struct block_defs block_ysem_defs = {
1091 	"ysem", { true, true, true }, true, DBG_YSTORM_ID,
1092 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1093 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1094 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1095 	YSEM_REG_DBG_FORCE_FRAME,
1096 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1097 
1098 static struct block_defs block_psem_defs = {
1099 	"psem", { true, true, true }, true, DBG_PSTORM_ID,
1100 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1101 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1102 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1103 	PSEM_REG_DBG_FORCE_FRAME,
1104 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1105 
1106 static struct block_defs block_rss_defs = {
1107 	"rss", { true, true, true }, false, 0,
1108 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1109 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1110 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1111 	RSS_REG_DBG_FORCE_FRAME,
1112 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1113 
1114 static struct block_defs block_tmld_defs = {
1115 	"tmld", { true, true, true }, false, 0,
1116 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1117 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1118 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1119 	TMLD_REG_DBG_FORCE_FRAME,
1120 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1121 
1122 static struct block_defs block_muld_defs = {
1123 	"muld", { true, true, true }, false, 0,
1124 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1125 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1126 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1127 	MULD_REG_DBG_FORCE_FRAME,
1128 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1129 
1130 static struct block_defs block_yuld_defs = {
1131 	"yuld", { true, true, false }, false, 0,
1132 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1133 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1134 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1135 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1136 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1137 
1138 static struct block_defs block_xyld_defs = {
1139 	"xyld", { true, true, true }, false, 0,
1140 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1141 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1142 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1143 	XYLD_REG_DBG_FORCE_FRAME,
1144 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1145 
1146 static struct block_defs block_ptld_defs = {
1147 	"ptld", { false, false, true }, false, 0,
1148 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1149 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1150 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1151 	PTLD_REG_DBG_FORCE_FRAME_E5,
1152 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1153 
1154 static struct block_defs block_ypld_defs = {
1155 	"ypld", { false, false, true }, false, 0,
1156 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1157 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1158 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1159 	YPLD_REG_DBG_FORCE_FRAME_E5,
1160 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1161 
1162 static struct block_defs block_prm_defs = {
1163 	"prm", { true, true, true }, false, 0,
1164 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1165 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1166 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1167 	PRM_REG_DBG_FORCE_FRAME,
1168 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1169 
1170 static struct block_defs block_pbf_pb1_defs = {
1171 	"pbf_pb1", { true, true, true }, false, 0,
1172 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1173 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1174 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1175 	PBF_PB1_REG_DBG_FORCE_FRAME,
1176 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1177 
1178 static struct block_defs block_pbf_pb2_defs = {
1179 	"pbf_pb2", { true, true, true }, false, 0,
1180 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1181 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1182 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1183 	PBF_PB2_REG_DBG_FORCE_FRAME,
1184 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1185 
1186 static struct block_defs block_rpb_defs = {
1187 	"rpb", { true, true, true }, false, 0,
1188 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1189 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1190 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1191 	RPB_REG_DBG_FORCE_FRAME,
1192 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1193 
1194 static struct block_defs block_btb_defs = {
1195 	"btb", { true, true, true }, false, 0,
1196 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1197 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1198 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1199 	BTB_REG_DBG_FORCE_FRAME,
1200 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1201 
1202 static struct block_defs block_pbf_defs = {
1203 	"pbf", { true, true, true }, false, 0,
1204 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1205 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1206 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1207 	PBF_REG_DBG_FORCE_FRAME,
1208 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1209 
1210 static struct block_defs block_rdif_defs = {
1211 	"rdif", { true, true, true }, false, 0,
1212 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1213 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1214 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1215 	RDIF_REG_DBG_FORCE_FRAME,
1216 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1217 
1218 static struct block_defs block_tdif_defs = {
1219 	"tdif", { true, true, true }, false, 0,
1220 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1221 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1222 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1223 	TDIF_REG_DBG_FORCE_FRAME,
1224 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1225 
1226 static struct block_defs block_cdu_defs = {
1227 	"cdu", { true, true, true }, false, 0,
1228 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1229 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1230 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1231 	CDU_REG_DBG_FORCE_FRAME,
1232 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1233 
1234 static struct block_defs block_ccfc_defs = {
1235 	"ccfc", { true, true, true }, false, 0,
1236 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1237 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1238 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1239 	CCFC_REG_DBG_FORCE_FRAME,
1240 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1241 
1242 static struct block_defs block_tcfc_defs = {
1243 	"tcfc", { true, true, true }, false, 0,
1244 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1245 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1246 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1247 	TCFC_REG_DBG_FORCE_FRAME,
1248 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1249 
1250 static struct block_defs block_igu_defs = {
1251 	"igu", { true, true, true }, false, 0,
1252 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1253 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1254 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1255 	IGU_REG_DBG_FORCE_FRAME,
1256 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1257 
1258 static struct block_defs block_cau_defs = {
1259 	"cau", { true, true, true }, false, 0,
1260 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1261 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1262 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1263 	CAU_REG_DBG_FORCE_FRAME,
1264 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1265 
1266 /* TODO: add debug bus parameters when E5 RGFS RF is added */
1267 static struct block_defs block_rgfs_defs = {
1268 	"rgfs", { false, false, true }, false, 0,
1269 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1270 	0, 0, 0, 0, 0,
1271 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1272 
1273 static struct block_defs block_rgsrc_defs = {
1274 	"rgsrc", { false, false, true }, false, 0,
1275 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1276 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1277 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1278 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1279 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1280 
1281 /* TODO: add debug bus parameters when E5 TGFS RF is added */
1282 static struct block_defs block_tgfs_defs = {
1283 	"tgfs", { false, false, true }, false, 0,
1284 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1285 	0, 0, 0, 0, 0,
1286 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1287 
1288 static struct block_defs block_tgsrc_defs = {
1289 	"tgsrc", { false, false, true }, false, 0,
1290 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1291 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1292 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1293 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1294 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1295 
1296 static struct block_defs block_umac_defs = {
1297 	"umac", { true, true, true }, false, 0,
1298 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1299 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1300 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1301 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1302 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1303 
1304 static struct block_defs block_xmac_defs = {
1305 	"xmac", { true, false, false }, false, 0,
1306 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1307 	0, 0, 0, 0, 0,
1308 	false, false, MAX_DBG_RESET_REGS, 0	};
1309 
1310 static struct block_defs block_dbg_defs = {
1311 	"dbg", { true, true, true }, false, 0,
1312 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1313 	0, 0, 0, 0, 0,
1314 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1315 
1316 static struct block_defs block_nig_defs = {
1317 	"nig", { true, true, true }, false, 0,
1318 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1319 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1320 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1321 	NIG_REG_DBG_FORCE_FRAME,
1322 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1323 
1324 static struct block_defs block_wol_defs = {
1325 	"wol", { false, true, true }, false, 0,
1326 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1327 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1328 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1329 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1330 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1331 
1332 static struct block_defs block_bmbn_defs = {
1333 	"bmbn", { false, true, true }, false, 0,
1334 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1335 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1336 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1337 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1338 	false, false, MAX_DBG_RESET_REGS, 0 };
1339 
1340 static struct block_defs block_ipc_defs = {
1341 	"ipc", { true, true, true }, false, 0,
1342 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1343 	0, 0, 0, 0, 0,
1344 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1345 
1346 static struct block_defs block_nwm_defs = {
1347 	"nwm", { false, true, true }, false, 0,
1348 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1349 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1350 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1351 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1352 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1353 
1354 static struct block_defs block_nws_defs = {
1355 	"nws", { false, true, true }, false, 0,
1356 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1357 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1358 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1359 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1360 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1361 
1362 static struct block_defs block_ms_defs = {
1363 	"ms", { false, true, true }, false, 0,
1364 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1365 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1366 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1367 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1368 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1369 
1370 static struct block_defs block_phy_pcie_defs = {
1371 	"phy_pcie", { false, true, true }, false, 0,
1372 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1373 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1374 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1375 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1376 	false, false, MAX_DBG_RESET_REGS, 0 };
1377 
1378 static struct block_defs block_led_defs = {
1379 	"led", { false, true, true }, false, 0,
1380 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1381 	0, 0, 0, 0, 0,
1382 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1383 
1384 static struct block_defs block_avs_wrap_defs = {
1385 	"avs_wrap", { false, true, false }, false, 0,
1386 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1387 	0, 0, 0, 0, 0,
1388 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1389 
1390 static struct block_defs block_pxpreqbus_defs = {
1391 	"pxpreqbus", { false, false, false }, false, 0,
1392 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1393 	0, 0, 0, 0, 0,
1394 	false, false, MAX_DBG_RESET_REGS, 0 };
1395 
1396 static struct block_defs block_misc_aeu_defs = {
1397 	"misc_aeu", { true, true, true }, false, 0,
1398 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1399 	0, 0, 0, 0, 0,
1400 	false, false, MAX_DBG_RESET_REGS, 0 };
1401 
1402 static struct block_defs block_bar0_map_defs = {
1403 	"bar0_map", { true, true, true }, false, 0,
1404 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1405 	0, 0, 0, 0, 0,
1406 	false, false, MAX_DBG_RESET_REGS, 0 };
1407 
1408 
1409 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1410 	&block_grc_defs,
1411  	&block_miscs_defs,
1412  	&block_misc_defs,
1413  	&block_dbu_defs,
1414  	&block_pglue_b_defs,
1415  	&block_cnig_defs,
1416  	&block_cpmu_defs,
1417  	&block_ncsi_defs,
1418  	&block_opte_defs,
1419  	&block_bmb_defs,
1420  	&block_pcie_defs,
1421  	&block_mcp_defs,
1422  	&block_mcp2_defs,
1423  	&block_pswhst_defs,
1424  	&block_pswhst2_defs,
1425  	&block_pswrd_defs,
1426  	&block_pswrd2_defs,
1427  	&block_pswwr_defs,
1428  	&block_pswwr2_defs,
1429  	&block_pswrq_defs,
1430  	&block_pswrq2_defs,
1431  	&block_pglcs_defs,
1432  	&block_dmae_defs,
1433  	&block_ptu_defs,
1434  	&block_tcm_defs,
1435  	&block_mcm_defs,
1436  	&block_ucm_defs,
1437  	&block_xcm_defs,
1438  	&block_ycm_defs,
1439  	&block_pcm_defs,
1440  	&block_qm_defs,
1441  	&block_tm_defs,
1442  	&block_dorq_defs,
1443  	&block_brb_defs,
1444  	&block_src_defs,
1445  	&block_prs_defs,
1446  	&block_tsdm_defs,
1447  	&block_msdm_defs,
1448  	&block_usdm_defs,
1449  	&block_xsdm_defs,
1450  	&block_ysdm_defs,
1451  	&block_psdm_defs,
1452  	&block_tsem_defs,
1453  	&block_msem_defs,
1454  	&block_usem_defs,
1455  	&block_xsem_defs,
1456  	&block_ysem_defs,
1457  	&block_psem_defs,
1458  	&block_rss_defs,
1459  	&block_tmld_defs,
1460  	&block_muld_defs,
1461  	&block_yuld_defs,
1462  	&block_xyld_defs,
1463  	&block_ptld_defs,
1464  	&block_ypld_defs,
1465  	&block_prm_defs,
1466  	&block_pbf_pb1_defs,
1467  	&block_pbf_pb2_defs,
1468  	&block_rpb_defs,
1469  	&block_btb_defs,
1470  	&block_pbf_defs,
1471  	&block_rdif_defs,
1472  	&block_tdif_defs,
1473  	&block_cdu_defs,
1474  	&block_ccfc_defs,
1475  	&block_tcfc_defs,
1476  	&block_igu_defs,
1477  	&block_cau_defs,
1478  	&block_rgfs_defs,
1479  	&block_rgsrc_defs,
1480  	&block_tgfs_defs,
1481  	&block_tgsrc_defs,
1482  	&block_umac_defs,
1483  	&block_xmac_defs,
1484  	&block_dbg_defs,
1485  	&block_nig_defs,
1486  	&block_wol_defs,
1487  	&block_bmbn_defs,
1488  	&block_ipc_defs,
1489  	&block_nwm_defs,
1490  	&block_nws_defs,
1491  	&block_ms_defs,
1492  	&block_phy_pcie_defs,
1493  	&block_led_defs,
1494  	&block_avs_wrap_defs,
1495  	&block_pxpreqbus_defs,
1496  	&block_misc_aeu_defs,
1497  	&block_bar0_map_defs,
1498 
1499 };
1500 
1501 
1502 /* Constraint operation types */
1503 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1504 
1505 	/* DBG_BUS_CONSTRAINT_OP_EQ */
1506 	{ 0, false },
1507 
1508 	/* DBG_BUS_CONSTRAINT_OP_NE */
1509 	{ 5, false },
1510 
1511 	/* DBG_BUS_CONSTRAINT_OP_LT */
1512 	{ 1, false },
1513 
1514 	/* DBG_BUS_CONSTRAINT_OP_LTC */
1515 	{ 1, true },
1516 
1517 	/* DBG_BUS_CONSTRAINT_OP_LE */
1518 	{ 2, false },
1519 
1520 	/* DBG_BUS_CONSTRAINT_OP_LEC */
1521 	{ 2, true },
1522 
1523 	/* DBG_BUS_CONSTRAINT_OP_GT */
1524 	{ 4, false },
1525 
1526 	/* DBG_BUS_CONSTRAINT_OP_GTC */
1527 	{ 4, true },
1528 
1529 	/* DBG_BUS_CONSTRAINT_OP_GE */
1530 	{ 3, false },
1531 
1532 	/* DBG_BUS_CONSTRAINT_OP_GEC */
1533 	{ 3, true }
1534 };
1535 
1536 static const char* s_dbg_target_names[] = {
1537 
1538 	/* DBG_BUS_TARGET_ID_INT_BUF */
1539 	"int-buf",
1540 
1541 	/* DBG_BUS_TARGET_ID_NIG */
1542 	"nw",
1543 
1544 	/* DBG_BUS_TARGET_ID_PCI */
1545 	"pci-buf"
1546 };
1547 
1548 static struct storm_mode_defs s_storm_mode_defs[] = {
1549 
1550 	/* DBG_BUS_STORM_MODE_PRINTF */
1551 	{ "printf", true, 0 },
1552 
1553 	/* DBG_BUS_STORM_MODE_PRAM_ADDR */
1554 	{ "pram_addr", true, 1 },
1555 
1556 	/* DBG_BUS_STORM_MODE_DRA_RW */
1557 	{ "dra_rw", true, 2 },
1558 
1559 	/* DBG_BUS_STORM_MODE_DRA_W */
1560 	{ "dra_w", true, 3 },
1561 
1562 	/* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1563 	{ "ld_st_addr", true, 4 },
1564 
1565 	/* DBG_BUS_STORM_MODE_DRA_FSM */
1566 	{ "dra_fsm", true, 5 },
1567 
1568 	/* DBG_BUS_STORM_MODE_RH */
1569 	{ "rh", true, 6 },
1570 
1571 	/* DBG_BUS_STORM_MODE_FOC */
1572 	{ "foc", false, 1 },
1573 
1574 	/* DBG_BUS_STORM_MODE_EXT_STORE */
1575 	{ "ext_store", false, 3 }
1576 };
1577 
1578 static struct platform_defs s_platform_defs[] = {
1579 
1580 	/* PLATFORM_ASIC */
1581 	{ "asic", 1, 256, 32768 },
1582 
1583 	/* PLATFORM_EMUL_FULL */
1584 	{ "emul_full", 2000, 8, 4096 },
1585 
1586 	/* PLATFORM_EMUL_REDUCED */
1587 	{ "emul_reduced", 2000, 8, 4096 },
1588 
1589 	/* PLATFORM_FPGA */
1590 	{ "fpga", 200, 32, 8192 }
1591 };
1592 
1593 static struct grc_param_defs s_grc_param_defs[] = {
1594 
1595 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1596 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1597 
1598 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1599 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1600 
1601 	/* DBG_GRC_PARAM_DUMP_USTORM */
1602 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1603 
1604 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1605 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1606 
1607 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1608 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1609 
1610 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1611 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1612 
1613 	/* DBG_GRC_PARAM_DUMP_REGS */
1614 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1615 
1616 	/* DBG_GRC_PARAM_DUMP_RAM */
1617 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1618 
1619 	/* DBG_GRC_PARAM_DUMP_PBUF */
1620 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1621 
1622 	/* DBG_GRC_PARAM_DUMP_IOR */
1623 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1624 
1625 	/* DBG_GRC_PARAM_DUMP_VFC */
1626 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1627 
1628 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1629 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1630 
1631 	/* DBG_GRC_PARAM_DUMP_ILT */
1632 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1633 
1634 	/* DBG_GRC_PARAM_DUMP_RSS */
1635 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1636 
1637 	/* DBG_GRC_PARAM_DUMP_CAU */
1638 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1639 
1640 	/* DBG_GRC_PARAM_DUMP_QM */
1641 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1642 
1643 	/* DBG_GRC_PARAM_DUMP_MCP */
1644 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1645 
1646 	/* DBG_GRC_PARAM_RESERVED */
1647 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1648 
1649 	/* DBG_GRC_PARAM_DUMP_CFC */
1650 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1651 
1652 	/* DBG_GRC_PARAM_DUMP_IGU */
1653 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1654 
1655 	/* DBG_GRC_PARAM_DUMP_BRB */
1656 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1657 
1658 	/* DBG_GRC_PARAM_DUMP_BTB */
1659 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1660 
1661 	/* DBG_GRC_PARAM_DUMP_BMB */
1662 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1663 
1664 	/* DBG_GRC_PARAM_DUMP_NIG */
1665 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1666 
1667 	/* DBG_GRC_PARAM_DUMP_MULD */
1668 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1669 
1670 	/* DBG_GRC_PARAM_DUMP_PRS */
1671 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1672 
1673 	/* DBG_GRC_PARAM_DUMP_DMAE */
1674 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1675 
1676 	/* DBG_GRC_PARAM_DUMP_TM */
1677 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1678 
1679 	/* DBG_GRC_PARAM_DUMP_SDM */
1680 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1681 
1682 	/* DBG_GRC_PARAM_DUMP_DIF */
1683 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1684 
1685 	/* DBG_GRC_PARAM_DUMP_STATIC */
1686 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1687 
1688 	/* DBG_GRC_PARAM_UNSTALL */
1689 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1690 
1691 	/* DBG_GRC_PARAM_NUM_LCIDS */
1692 	{ { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1693 
1694 	/* DBG_GRC_PARAM_NUM_LTIDS */
1695 	{ { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1696 
1697 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1698 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1699 
1700 	/* DBG_GRC_PARAM_CRASH */
1701 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1702 
1703 	/* DBG_GRC_PARAM_PARITY_SAFE */
1704 	{ { 0, 0, 0 }, 0, 1, false, 1, 0 },
1705 
1706 	/* DBG_GRC_PARAM_DUMP_CM */
1707 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1708 
1709 	/* DBG_GRC_PARAM_DUMP_PHY */
1710 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1711 
1712 	/* DBG_GRC_PARAM_NO_MCP */
1713 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1714 
1715 	/* DBG_GRC_PARAM_NO_FW_VER */
1716 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 }
1717 };
1718 
1719 static struct rss_mem_defs s_rss_mem_defs[] = {
1720 	{ "rss_mem_cid", "rss_cid", 0, 32,
1721 	{ 256, 320, 512 } },
1722 
1723 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1724 	{ 128, 208, 257 } },
1725 
1726 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1727 	{ 128, 208, 257 } },
1728 
1729 	{ "rss_mem_info", "rss_info", 3072, 16,
1730 	{ 128, 208, 256 } },
1731 
1732 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1733 	{ 16384, 26624, 32768 } }
1734 };
1735 
1736 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1737 	{ "vfc_ram_tt1", "vfc_ram", 0, 512 },
1738 	{ "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1739 	{ "vfc_ram_stt2", "vfc_ram", 640, 32 },
1740 	{ "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1741 };
1742 
1743 static struct big_ram_defs s_big_ram_defs[] = {
1744 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1745 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 0, 0 },
1746 	  { 153600, 180224, 282624 } },
1747 
1748 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1749 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 1, 1 },
1750 	  { 92160, 117760, 168960 } },
1751 
1752 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1753 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, MISCS_REG_BLOCK_256B_EN, { 0, 0, 0 },
1754 	  { 36864, 36864, 36864 } }
1755 };
1756 
1757 static struct reset_reg_defs s_reset_regs_defs[] = {
1758 
1759 	/* DBG_RESET_REG_MISCS_PL_UA */
1760 	{ MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1761 
1762 	/* DBG_RESET_REG_MISCS_PL_HV */
1763 	{ MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1764 
1765 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1766 	{ MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1767 
1768 	/* DBG_RESET_REG_MISC_PL_UA */
1769 	{ MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1770 
1771 	/* DBG_RESET_REG_MISC_PL_HV */
1772 	{ MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1773 
1774 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1775 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1776 
1777 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1778 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1779 
1780 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1781 	{ MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1782 };
1783 
1784 static struct phy_defs s_phy_defs[] = {
1785 	{ "nw_phy", NWS_REG_NWS_CMU_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1786 	{ "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1787 	{ "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1788 	{ "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1789 };
1790 
1791 /* The order of indexes that should be applied to a PCI buffer line */
1792 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1793 
1794 /******************************** Variables **********************************/
1795 
1796 /* The version of the calling app */
1797 static u32 s_app_ver;
1798 
1799 /**************************** Private Functions ******************************/
1800 
1801 static void ecore_static_asserts(void)
1802 {
1803 	CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1804 	CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1805 	CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1806 	CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1807 	CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1808 	CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1809 	CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1810 	CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1811 	CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1812 	CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1813 	CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1814 	CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1815 }
1816 
1817 /* Reads and returns a single dword from the specified unaligned buffer. */
1818 static u32 ecore_read_unaligned_dword(u8 *buf)
1819 {
1820 	u32 dword;
1821 
1822 	OSAL_MEMCPY((u8 *)&dword, buf, sizeof(dword));
1823 	return dword;
1824 }
1825 
1826 /* Returns the difference in bytes between the specified physical addresses.
1827  * Assumes that the first address is bigger then the second, and that the
1828  * difference is a 32-bit value.
1829  */
1830 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1831 								struct dbg_bus_mem_addr *b)
1832 {
1833 	return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1834 }
1835 
1836 /* Sets the value of the specified GRC param */
1837 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1838 				 enum dbg_grc_params grc_param,
1839 				 u32 val)
1840 {
1841 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1842 
1843 	dev_data->grc.param_val[grc_param] = val;
1844 }
1845 
1846 /* Returns the value of the specified GRC param */
1847 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1848 							   enum dbg_grc_params grc_param)
1849 {
1850 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1851 
1852 	return dev_data->grc.param_val[grc_param];
1853 }
1854 
1855 /* Initializes the GRC parameters */
1856 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1857 {
1858 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1859 
1860 	if (!dev_data->grc.params_initialized) {
1861 		ecore_dbg_grc_set_params_default(p_hwfn);
1862 		dev_data->grc.params_initialized = 1;
1863 	}
1864 }
1865 
1866 /* Initializes debug data for the specified device */
1867 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1868 										  struct ecore_ptt *p_ptt)
1869 {
1870 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1871 
1872 	if (dev_data->initialized)
1873 		return DBG_STATUS_OK;
1874 
1875 	if (!s_app_ver)
1876 		return DBG_STATUS_APP_VERSION_NOT_SET;
1877 
1878 	if (ECORE_IS_E5(p_hwfn->p_dev)) {
1879 		dev_data->chip_id = CHIP_E5;
1880 		dev_data->mode_enable[MODE_E5] = 1;
1881 	}
1882 	else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1883 		dev_data->chip_id = CHIP_K2;
1884 		dev_data->mode_enable[MODE_K2] = 1;
1885 	}
1886 	else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1887 		dev_data->chip_id = CHIP_BB;
1888 		dev_data->mode_enable[MODE_BB] = 1;
1889 	}
1890 	else {
1891 		return DBG_STATUS_UNKNOWN_CHIP;
1892 	}
1893 
1894 #ifdef ASIC_ONLY
1895 	dev_data->platform_id = PLATFORM_ASIC;
1896 	dev_data->mode_enable[MODE_ASIC] = 1;
1897 #else
1898 	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1899 		dev_data->platform_id = PLATFORM_ASIC;
1900 		dev_data->mode_enable[MODE_ASIC] = 1;
1901 	}
1902 	else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1903 		if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1904 			dev_data->platform_id = PLATFORM_EMUL_FULL;
1905 			dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1906 		}
1907 		else {
1908 			dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1909 			dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1910 		}
1911 	}
1912 	else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1913 		dev_data->platform_id = PLATFORM_FPGA;
1914 		dev_data->mode_enable[MODE_FPGA] = 1;
1915 	}
1916 	else {
1917 		return DBG_STATUS_UNKNOWN_CHIP;
1918 	}
1919 #endif
1920 
1921 	/* Initializes the GRC parameters */
1922 	ecore_dbg_grc_init_params(p_hwfn);
1923 
1924 	dev_data->use_dmae = USE_DMAE;
1925 	dev_data->num_regs_read = 0;
1926 	dev_data->initialized = 1;
1927 
1928 	return DBG_STATUS_OK;
1929 }
1930 
1931 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1932 														  enum block_id block_id)
1933 {
1934 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1935 
1936 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1937 }
1938 
1939 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1940 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1941 														enum block_id block_id)
1942 {
1943 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1944 	struct dbg_bus_block_data *block_bus;
1945 	struct dbg_bus_block *block_desc;
1946 
1947 	block_bus = &dev_data->bus.blocks[block_id];
1948 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1949 
1950 	if (!block_bus->line_num ||
1951 		(block_bus->line_num == 1 && block_desc->has_latency_events) ||
1952 		block_bus->line_num >= NUM_DBG_LINES(block_desc))
1953 		return OSAL_NULL;
1954 
1955 	return (struct dbg_bus_line *)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1956 }
1957 
1958 /* Reads the FW info structure for the specified Storm from the chip,
1959  * and writes it to the specified fw_info pointer.
1960  */
1961 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1962 							   struct ecore_ptt *p_ptt,
1963 							   u8 storm_id,
1964 							   struct fw_info *fw_info)
1965 {
1966 	struct storm_defs *storm = &s_storm_defs[storm_id];
1967 	struct fw_info_location fw_info_location;
1968 	u32 addr, i, *dest;
1969 
1970 	OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1971 	OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1972 
1973 	/* Read first the address that points to fw_info location.
1974 	 * The address is located in the last line of the Storm RAM.
1975 	 */
1976 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1977 		(ECORE_IS_E5(p_hwfn->p_dev) ?
1978 			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_E5) :
1979 			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2))
1980 		- sizeof(fw_info_location);
1981 
1982 	dest = (u32 *)&fw_info_location;
1983 
1984 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1985 		dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1986 
1987 	/* Read FW version info from Storm RAM */
1988 	if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1989 		addr = fw_info_location.grc_addr;
1990 		dest = (u32 *)fw_info;
1991 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1992 			dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1993 	}
1994 }
1995 
1996 /* Dumps the specified string to the specified buffer.
1997  * Returns the dumped size in bytes.
1998  */
1999 static u32 ecore_dump_str(char *dump_buf,
2000 						  bool dump,
2001 						  const char *str)
2002 {
2003 	if (dump)
2004 		OSAL_STRCPY(dump_buf, str);
2005 
2006 	return (u32)OSAL_STRLEN(str) + 1;
2007 }
2008 
2009 /* Dumps zeros to align the specified buffer to dwords.
2010  * Returns the dumped size in bytes.
2011  */
2012 static u32 ecore_dump_align(char *dump_buf,
2013 							bool dump,
2014 							u32 byte_offset)
2015 {
2016 	u8 offset_in_dword, align_size;
2017 
2018 	offset_in_dword = (u8)(byte_offset & 0x3);
2019 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
2020 
2021 	if (dump && align_size)
2022 		OSAL_MEMSET(dump_buf, 0, align_size);
2023 
2024 	return align_size;
2025 }
2026 
2027 /* Writes the specified string param to the specified buffer.
2028  * Returns the dumped size in dwords.
2029  */
2030 static u32 ecore_dump_str_param(u32 *dump_buf,
2031 								bool dump,
2032 								const char *param_name,
2033 								const char *param_val)
2034 {
2035 	char *char_buf = (char *)dump_buf;
2036 	u32 offset = 0;
2037 
2038 	/* Dump param name */
2039 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2040 
2041 	/* Indicate a string param value */
2042 	if (dump)
2043 		*(char_buf + offset) = 1;
2044 	offset++;
2045 
2046 	/* Dump param value */
2047 	offset += ecore_dump_str(char_buf + offset, dump, param_val);
2048 
2049 	/* Align buffer to next dword */
2050 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2051 
2052 	return BYTES_TO_DWORDS(offset);
2053 }
2054 
2055 /* Writes the specified numeric param to the specified buffer.
2056  * Returns the dumped size in dwords.
2057  */
2058 static u32 ecore_dump_num_param(u32 *dump_buf,
2059 								bool dump,
2060 								const char *param_name,
2061 								u32 param_val)
2062 {
2063 	char *char_buf = (char *)dump_buf;
2064 	u32 offset = 0;
2065 
2066 	/* Dump param name */
2067 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2068 
2069 	/* Indicate a numeric param value */
2070 	if (dump)
2071 		*(char_buf + offset) = 0;
2072 	offset++;
2073 
2074 	/* Align buffer to next dword */
2075 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2076 
2077 	/* Dump param value (and change offset from bytes to dwords) */
2078 	offset = BYTES_TO_DWORDS(offset);
2079 	if (dump)
2080 		*(dump_buf + offset) = param_val;
2081 	offset++;
2082 
2083 	return offset;
2084 }
2085 
2086 /* Reads the FW version and writes it as a param to the specified buffer.
2087  * Returns the dumped size in dwords.
2088  */
2089 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2090 								   struct ecore_ptt *p_ptt,
2091 								   u32 *dump_buf,
2092 								   bool dump)
2093 {
2094 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2095 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2096 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2097 	struct fw_info fw_info = { { 0 }, { 0 } };
2098 	u32 offset = 0;
2099 
2100 	if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2101 		/* Read FW image/version from PRAM in a non-reset SEMI */
2102 		bool found = false;
2103 		u8 storm_id;
2104 
2105 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2106 			struct storm_defs *storm = &s_storm_defs[storm_id];
2107 
2108 			/* Read FW version/image */
2109 			if (dev_data->block_in_reset[storm->block_id])
2110 				continue;
2111 
2112 			/* Read FW info for the current Storm */
2113 			ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2114 
2115 			/* Create FW version/image strings */
2116 			if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2117 				DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2118 			switch (fw_info.ver.image_id) {
2119 			case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2120 			case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2121 			case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2122 			default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2123 			}
2124 
2125 			found = true;
2126 		}
2127 	}
2128 
2129 	/* Dump FW version, image and timestamp */
2130 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2131 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2132 	offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2133 
2134 	return offset;
2135 }
2136 
2137 /* Reads the MFW version and writes it as a param to the specified buffer.
2138  * Returns the dumped size in dwords.
2139  */
2140 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2141 									struct ecore_ptt *p_ptt,
2142 									u32 *dump_buf,
2143 									bool dump)
2144 {
2145 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2146 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2147 
2148 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2149 		u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2150 
2151 		/* Find MCP public data GRC address. Needs to be ORed with
2152 		 * MCP_REG_SCRATCH due to a HW bug.
2153 		 */
2154 		public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2155 
2156 		/* Find MCP public global section offset */
2157 		global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2158 		global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2159 		global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2160 
2161 		/* Read MFW version from MCP public global section */
2162 		mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2163 
2164 		/* Dump MFW version param */
2165 		if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2166 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2167 	}
2168 
2169 	return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2170 }
2171 
2172 /* Writes a section header to the specified buffer.
2173  * Returns the dumped size in dwords.
2174  */
2175 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2176 								  bool dump,
2177 								  const char *name,
2178 								  u32 num_params)
2179 {
2180 	return ecore_dump_num_param(dump_buf, dump, name, num_params);
2181 }
2182 
2183 /* Writes the common global params to the specified buffer.
2184  * Returns the dumped size in dwords.
2185  */
2186 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2187 										   struct ecore_ptt *p_ptt,
2188 										   u32 *dump_buf,
2189 										   bool dump,
2190 										   u8 num_specific_global_params)
2191 {
2192 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2193 	u32 offset = 0;
2194 	u8 num_params;
2195 
2196 	/* Dump global params section header */
2197 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2198 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2199 
2200 	/* Store params */
2201 	offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2202 	offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2203 	offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2204 	offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2205 	offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2206 	offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2207 
2208 	return offset;
2209 }
2210 
2211 /* Writes the "last" section (including CRC) to the specified buffer at the
2212  * given offset. Returns the dumped size in dwords.
2213  */
2214 static u32 ecore_dump_last_section(u32 *dump_buf,
2215 								   u32 offset,
2216 								   bool dump)
2217 {
2218 	u32 start_offset = offset;
2219 
2220 	/* Dump CRC section header */
2221 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2222 
2223 	/* Calculate CRC32 and add it to the dword after the "last" section */
2224 	if (dump)
2225 		*(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8 *)dump_buf, DWORDS_TO_BYTES(offset));
2226 
2227 	offset++;
2228 
2229 	return offset - start_offset;
2230 }
2231 
2232 /* Update blocks reset state  */
2233 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2234 											struct ecore_ptt *p_ptt)
2235 {
2236 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2237 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2238 	u32 i;
2239 
2240 	/* Read reset registers */
2241 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2242 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2243 			reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2244 
2245 	/* Check if blocks are in reset */
2246 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2247 		struct block_defs *block = s_block_defs[i];
2248 
2249 		dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2250 	}
2251 }
2252 
2253 /* Enable / disable the Debug block */
2254 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2255 									   struct ecore_ptt *p_ptt,
2256 									   bool enable)
2257 {
2258 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2259 }
2260 
2261 /* Resets the Debug block */
2262 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2263 									  struct ecore_ptt *p_ptt)
2264 {
2265 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2266 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2267 
2268 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2269 	old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2270 	new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2271 
2272 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2273 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2274 }
2275 
2276 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2277 									   struct ecore_ptt *p_ptt,
2278 									   enum dbg_bus_frame_modes mode)
2279 {
2280 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2281 }
2282 
2283 /* Enable / disable Debug Bus clients according to the specified mask
2284  * (1 = enable, 0 = disable).
2285  */
2286 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2287 									 struct ecore_ptt *p_ptt,
2288 									 u32 client_mask)
2289 {
2290 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2291 }
2292 
2293 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2294 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2295 								   struct ecore_ptt *p_ptt,
2296 								   enum dbg_storms storm_id)
2297 {
2298 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2299 	u32 base_addr, sem_filter_params = 0;
2300 	struct dbg_bus_storm_data *storm_bus;
2301 	struct storm_mode_defs *storm_mode;
2302 	struct storm_defs *storm;
2303 
2304 	storm = &s_storm_defs[storm_id];
2305 	storm_bus = &dev_data->bus.storms[storm_id];
2306 	storm_mode = &s_storm_mode_defs[storm_bus->mode];
2307 	base_addr = storm->sem_fast_mem_addr;
2308 
2309 	/* Config SEM */
2310 	if (storm_mode->is_fast_dbg) {
2311 
2312 		/* Enable fast debug */
2313 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2314 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2315 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2316 
2317 		/* Enable messages. Must be done after enabling
2318 		 * SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2319 		 * be dropped after the SEMI sync fifo is filled.
2320 		 */
2321 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_ENABLE_VAL);
2322 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_ENABLE_VAL);
2323 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE_VAL);
2324 	}
2325 	else {
2326 
2327 		/* Enable slow debug */
2328 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2329 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2330 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2331 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2332 	}
2333 
2334 	/* Config SEM cid filter */
2335 	if (storm_bus->cid_filter_en) {
2336 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2337 		sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2338 	}
2339 
2340 	/* Config SEM eid filter */
2341 	if (storm_bus->eid_filter_en) {
2342 		const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2343 
2344 		if (storm_bus->eid_range_not_mask) {
2345 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2346 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2347 			sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2348 		}
2349 		else {
2350 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2351 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2352 			sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2353 		}
2354 	}
2355 
2356 	/* Config accumulaed SEM filter parameters (if any) */
2357 	if (sem_filter_params)
2358 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2359 }
2360 
2361 /* Disables Debug Bus block inputs */
2362 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2363 												struct ecore_ptt *p_ptt,
2364 												bool empty_semi_fifos)
2365 {
2366 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2367 	u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2368 	bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2369 	u32 block_id;
2370 
2371 	/* Disable messages output in all Storms */
2372 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2373 		struct storm_defs *storm = &s_storm_defs[storm_id];
2374 
2375 		if (dev_data->block_in_reset[storm->block_id])
2376 			continue;
2377 
2378 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_DISABLE_VAL);
2379 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_DISABLE_VAL);
2380 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE_VAL);
2381 	}
2382 
2383 	/* Try to empty the SEMI sync fifo. Must be done after messages output
2384 	 * were disabled in all Storms.
2385 	 */
2386 	while (num_fifos_to_empty) {
2387 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2388 			struct storm_defs *storm = &s_storm_defs[storm_id];
2389 
2390 			if (is_fifo_empty[storm_id])
2391 				continue;
2392 
2393 			/* Check if sync fifo got empty */
2394 			if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2395 				is_fifo_empty[storm_id] = true;
2396 				num_fifos_to_empty--;
2397 			}
2398 		}
2399 
2400 		/* Check if need to continue polling */
2401 		if (num_fifos_to_empty) {
2402 			u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2403 			u32 polling_count = 0;
2404 
2405 			if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2406 				OSAL_MSLEEP(polling_ms);
2407 				polling_count++;
2408 			}
2409 			else {
2410 				DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2411 				break;
2412 			}
2413 		}
2414 	}
2415 
2416 	/* Disable debug in all Storms */
2417 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2418 		struct storm_defs *storm = &s_storm_defs[storm_id];
2419 		u32 base_addr = storm->sem_fast_mem_addr;
2420 
2421 		if (dev_data->block_in_reset[storm->block_id])
2422 			continue;
2423 
2424 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2425 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2426 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2427 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2428 	}
2429 
2430 	/* Disable all clients */
2431 	ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2432 
2433 	/* Disable all blocks */
2434 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2435 		struct block_defs *block = s_block_defs[block_id];
2436 
2437 		if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2438 			ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2439 	}
2440 
2441 	/* Disable timestamp */
2442 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2443 
2444 	/* Disable filters and triggers */
2445 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2446 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2447 
2448 	return DBG_STATUS_OK;
2449 }
2450 
2451 /* Sets a Debug Bus trigger/filter constraint */
2452 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2453 									 struct ecore_ptt *p_ptt,
2454 									 bool is_filter,
2455 									 u8 constraint_id,
2456 									 u8 hw_op_val,
2457 									 u32 data_val,
2458 									 u32 data_mask,
2459 									 u8 frame_bit,
2460 									 u8 frame_mask,
2461 									 u16 dword_offset,
2462 									 u16 range,
2463 									 u8 cyclic_bit,
2464 									 u8 must_bit)
2465 {
2466 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2467 	u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2468 	u8 curr_trigger_state;
2469 
2470 	/* For trigger only - set register offset according to state */
2471 	if (!is_filter) {
2472 		curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2473 		reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2474 	}
2475 
2476 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2477 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2478 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2479 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2480 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2481 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2482 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2483 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2484 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2485 }
2486 
2487 /* Reads the specified DBG Bus internal buffer range and copy it to the
2488  * specified buffer. Returns the dumped size in dwords.
2489  */
2490 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2491 										struct ecore_ptt *p_ptt,
2492 										u32 *dump_buf,
2493 										bool dump,
2494 										u32 start_line,
2495 										u32 end_line)
2496 {
2497 	u32 line, reg_addr, i, offset = 0;
2498 
2499 	if (!dump)
2500 		return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2501 
2502 	for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2503 		line <= end_line;
2504 		line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2505 		for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2506 			dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2507 
2508 	return offset;
2509 }
2510 
2511 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2512  * Returns the dumped size in dwords.
2513  */
2514 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2515 								  struct ecore_ptt *p_ptt,
2516 								  u32 *dump_buf,
2517 								  bool dump)
2518 {
2519 	u32 last_written_line, offset = 0;
2520 
2521 	last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2522 
2523 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2524 
2525 		/* Internal buffer was wrapped: first dump from write pointer
2526 		 * to buffer end, then dump from buffer start to write pointer.
2527 		 */
2528 		if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2529 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2530 		offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2531 	}
2532 	else if (last_written_line) {
2533 
2534 		/* Internal buffer wasn't wrapped: dump from buffer start until
2535 		 *  write pointer.
2536 		 */
2537 		if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2538 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2539 		else
2540 			DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2541 	}
2542 
2543 	return offset;
2544 }
2545 
2546 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2547  * buffer. Returns the dumped size in dwords.
2548  */
2549 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2550 										u32 *dump_buf,
2551 										bool dump,
2552 										u32 start_line,
2553 										u32 end_line)
2554 {
2555 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2556 	u32 offset = 0;
2557 
2558 	/* Extract PCI buffer pointer from virtual address */
2559 	void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2560 	u32 *pci_buf_start = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2561 	u32 *pci_buf, line, i;
2562 
2563 	if (!dump)
2564 		return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2565 
2566 	for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2567 	line <= end_line;
2568 		line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2569 		for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2570 			dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2571 
2572 	return offset;
2573 }
2574 
2575 /* Copies the DBG Bus PCI buffer to the specified buffer.
2576  * Returns the dumped size in dwords.
2577  */
2578 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2579 								  struct ecore_ptt *p_ptt,
2580 								  u32 *dump_buf,
2581 								  bool dump)
2582 {
2583 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2584 	u32 next_wr_byte_offset, next_wr_line_offset;
2585 	struct dbg_bus_mem_addr next_wr_phys_addr;
2586 	u32 pci_buf_size_in_lines, offset = 0;
2587 
2588 	pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2589 
2590 	/* Extract write pointer (physical address) */
2591 	next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2592 	next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2593 
2594 	/* Convert write pointer to offset */
2595 	next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2596 	if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2597 		return 0;
2598 	next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2599 
2600 	/* PCI buffer wrapped: first dump from write pointer to buffer end. */
2601 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2602 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2603 
2604 	/* Dump from buffer start until write pointer */
2605 	if (next_wr_line_offset)
2606 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2607 
2608 	return offset;
2609 }
2610 
2611 /* Copies the DBG Bus recorded data to the specified buffer.
2612  * Returns the dumped size in dwords.
2613  */
2614 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2615 							   struct ecore_ptt *p_ptt,
2616 							   u32 *dump_buf,
2617 							   bool dump)
2618 {
2619 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2620 
2621 	switch (dev_data->bus.target) {
2622 	case DBG_BUS_TARGET_ID_INT_BUF:
2623 		return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2624 	case DBG_BUS_TARGET_ID_PCI:
2625 		return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2626 	default:
2627 		break;
2628 	}
2629 
2630 	return 0;
2631 }
2632 
2633 /* Frees the Debug Bus PCI buffer */
2634 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2635 {
2636 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2637 	dma_addr_t pci_buf_phys_addr;
2638 	void *virt_addr_lo;
2639 	u32 *pci_buf;
2640 
2641 	/* Extract PCI buffer pointer from virtual address */
2642 	virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2643 	pci_buf = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2644 
2645 	if (!dev_data->bus.pci_buf.size)
2646 		return;
2647 
2648 	OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2649 
2650 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2651 
2652 	dev_data->bus.pci_buf.size = 0;
2653 }
2654 
2655 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2656  * Returns the dumped size in dwords.
2657  */
2658 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2659 								 u32 *dump_buf,
2660 								 bool dump)
2661 {
2662 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2663 	char storm_name[8] = "?storm";
2664 	u32 block_id, offset = 0;
2665 	u8 storm_id;
2666 
2667 	/* Store storms */
2668 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2669 		struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2670 		struct storm_defs *storm = &s_storm_defs[storm_id];
2671 
2672 		if (!dev_data->bus.storms[storm_id].enabled)
2673 			continue;
2674 
2675 		/* Dump section header */
2676 		storm_name[0] = storm->letter;
2677 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2678 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2679 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2680 		offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2681 	}
2682 
2683 	/* Store blocks */
2684 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2685 		struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2686 		struct block_defs *block = s_block_defs[block_id];
2687 
2688 		if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2689 			continue;
2690 
2691 		/* Dump section header */
2692 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2693 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2694 		offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2695 		offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2696 		offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2697 	}
2698 
2699 	return offset;
2700 }
2701 
2702 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2703  * buffer. Returns the dumped size in dwords.
2704  */
2705 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2706 							  struct ecore_ptt *p_ptt,
2707 							  u32 *dump_buf,
2708 							  bool dump)
2709 {
2710 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2711 	char hw_id_mask_str[16];
2712 	u32 offset = 0;
2713 
2714 	if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2715 		DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2716 
2717 	/* Dump global params */
2718 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2719 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2720 	offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2721 	offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2722 	offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2723 	offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2724 
2725 	offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2726 
2727 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2728 		u32 recorded_dwords = 0;
2729 
2730 		if (dump)
2731 			recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2732 
2733 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2734 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2735 	}
2736 
2737 	return offset;
2738 }
2739 
2740 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2741 								u16 *modes_buf_offset)
2742 {
2743 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2744 	bool arg1, arg2;
2745 	u8 tree_val;
2746 
2747 	/* Get next element from modes tree buffer */
2748 	tree_val = ((u8 *)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2749 
2750 	switch (tree_val) {
2751 	case INIT_MODE_OP_NOT:
2752 		return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2753 	case INIT_MODE_OP_OR:
2754 	case INIT_MODE_OP_AND:
2755 		arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2756 		arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2757 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2758 	default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2759 	}
2760 }
2761 
2762 /* Returns true if the specified entity (indicated by GRC param) should be
2763  * included in the dump, false otherwise.
2764  */
2765 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2766 								  enum dbg_grc_params grc_param)
2767 {
2768 	return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2769 }
2770 
2771 /* Returns true of the specified Storm should be included in the dump, false
2772  * otherwise.
2773  */
2774 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2775 										enum dbg_storms storm)
2776 {
2777 	return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2778 }
2779 
2780 /* Returns true if the specified memory should be included in the dump, false
2781  * otherwise.
2782  */
2783 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2784 									  enum block_id block_id,
2785 									  u8 mem_group_id)
2786 {
2787 	struct block_defs *block = s_block_defs[block_id];
2788 	u8 i;
2789 
2790 	/* Check Storm match */
2791 	if (block->associated_to_storm &&
2792 		!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2793 		return false;
2794 
2795 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2796 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2797 
2798 		if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2799 			return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2800 	}
2801 
2802 	switch (mem_group_id) {
2803 	case MEM_GROUP_PXP_ILT:
2804 	case MEM_GROUP_PXP_MEM:
2805 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2806 	case MEM_GROUP_RAM:
2807 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2808 	case MEM_GROUP_PBUF:
2809 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2810 	case MEM_GROUP_CAU_MEM:
2811 	case MEM_GROUP_CAU_SB:
2812 	case MEM_GROUP_CAU_PI:
2813 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2814 	case MEM_GROUP_QM_MEM:
2815 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2816 	case MEM_GROUP_CFC_MEM:
2817 	case MEM_GROUP_CONN_CFC_MEM:
2818 	case MEM_GROUP_TASK_CFC_MEM:
2819 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2820 	case MEM_GROUP_IGU_MEM:
2821 	case MEM_GROUP_IGU_MSIX:
2822 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2823 	case MEM_GROUP_MULD_MEM:
2824 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2825 	case MEM_GROUP_PRS_MEM:
2826 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2827 	case MEM_GROUP_DMAE_MEM:
2828 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2829 	case MEM_GROUP_TM_MEM:
2830 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2831 	case MEM_GROUP_SDM_MEM:
2832 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2833 	case MEM_GROUP_TDIF_CTX:
2834 	case MEM_GROUP_RDIF_CTX:
2835 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2836 	case MEM_GROUP_CM_MEM:
2837 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2838 	case MEM_GROUP_IOR:
2839 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2840 	default:
2841 		return true;
2842 	}
2843 }
2844 
2845 /* Stalls all Storms */
2846 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2847 								   struct ecore_ptt *p_ptt,
2848 								   bool stall)
2849 {
2850 	u32 reg_addr;
2851 	u8 storm_id;
2852 
2853 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2854 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2855 			continue;
2856 
2857 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2858 		ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2859 	}
2860 
2861 	OSAL_MSLEEP(STALL_DELAY_MS);
2862 }
2863 
2864 /* Takes all blocks out of reset */
2865 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2866 									 struct ecore_ptt *p_ptt)
2867 {
2868 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2869 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2870 	u32 block_id, i;
2871 
2872 	/* Fill reset regs values */
2873 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2874 		struct block_defs *block = s_block_defs[block_id];
2875 
2876 		if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2877 			reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2878 	}
2879 
2880 	/* Write reset registers */
2881 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2882 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2883 			continue;
2884 
2885 		reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2886 
2887 		if (reg_val[i])
2888 			ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2889 	}
2890 }
2891 
2892 /* Returns the attention block data of the specified block */
2893 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2894 																		enum dbg_attn_type attn_type)
2895 {
2896 	const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block *)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2897 
2898 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2899 }
2900 
2901 /* Returns the attention registers of the specified block */
2902 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2903 															enum dbg_attn_type attn_type,
2904 															u8 *num_attn_regs)
2905 {
2906 	const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2907 
2908 	*num_attn_regs = block_type_data->num_regs;
2909 
2910 	return &((const struct dbg_attn_reg *)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2911 }
2912 
2913 /* For each block, clear the status of all parities */
2914 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2915 									 struct ecore_ptt *p_ptt)
2916 {
2917 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2918 	const struct dbg_attn_reg *attn_reg_arr;
2919 	u8 reg_idx, num_attn_regs;
2920 	u32 block_id;
2921 
2922 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2923 		if (dev_data->block_in_reset[block_id])
2924 			continue;
2925 
2926 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2927 
2928 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2929 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2930 			u16 modes_buf_offset;
2931 			bool eval_mode;
2932 
2933 			/* Check mode */
2934 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2935 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2936 
2937 			/* If Mode match: clear parity status */
2938 			if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2939 				ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2940 		}
2941 	}
2942 }
2943 
2944 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2945  * the following parameters are dumped:
2946  * - count:	 no. of dumped entries
2947  * - split:	 split type
2948  * - id:	 split ID (dumped only if split_id >= 0)
2949  * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2950  *		 and param_val != OSAL_NULL).
2951  */
2952 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2953 								   bool dump,
2954 								   u32 num_reg_entries,
2955 								   const char *split_type,
2956 								   int split_id,
2957 								   const char *param_name,
2958 								   const char *param_val)
2959 {
2960 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2961 	u32 offset = 0;
2962 
2963 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2964 	offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2965 	offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2966 	if (split_id >= 0)
2967 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2968 	if (param_name && param_val)
2969 		offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2970 
2971 	return offset;
2972 }
2973 
2974 /* Reads the specified registers into the specified buffer.
2975  * The addr and len arguments are specified in dwords.
2976  */
2977 void ecore_read_regs(struct ecore_hwfn *p_hwfn,
2978 					 struct ecore_ptt *p_ptt,
2979 					 u32 *buf,
2980 					 u32 addr,
2981 					 u32 len)
2982 {
2983 	u32 i;
2984 
2985 	for (i = 0; i < len; i++)
2986 		buf[i] = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2987 }
2988 
2989 /* Dumps the GRC registers in the specified address range.
2990  * Returns the dumped size in dwords.
2991  * The addr and len arguments are specified in dwords.
2992  */
2993 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2994 									 struct ecore_ptt *p_ptt,
2995 									 u32 *dump_buf,
2996 									 bool dump,
2997 									 u32 addr,
2998 									 u32 len,
2999 									 bool wide_bus)
3000 {
3001 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3002 
3003 	if (!dump)
3004 		return len;
3005 
3006 	/* Print log if needed */
3007 	dev_data->num_regs_read += len;
3008 	if (dev_data->num_regs_read >= s_platform_defs[dev_data->platform_id].log_thresh) {
3009 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers...\n", dev_data->num_regs_read);
3010 		dev_data->num_regs_read = 0;
3011 	}
3012 
3013 	/* Try reading using DMAE */
3014 	if (dev_data->use_dmae && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || (PROTECT_WIDE_BUS && wide_bus))) {
3015 		if (!ecore_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), (u64)(osal_uintptr_t)(dump_buf), len, OSAL_NULL))
3016 			return len;
3017 		dev_data->use_dmae = 0;
3018 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Failed reading from chip using DMAE, using GRC instead\n");
3019 	}
3020 
3021 	/* Read registers */
3022 	ecore_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
3023 
3024 	return len;
3025 }
3026 
3027 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
3028  * The addr and len arguments are specified in dwords.
3029  */
3030 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
3031 										bool dump,
3032 										u32 addr,
3033 										u32 len)
3034 {
3035 	if (dump)
3036 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
3037 
3038 	return 1;
3039 }
3040 
3041 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
3042  * The addr and len arguments are specified in dwords.
3043  */
3044 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
3045 									struct ecore_ptt *p_ptt,
3046 									u32 *dump_buf,
3047 									bool dump,
3048 									u32 addr,
3049 									u32 len,
3050 									bool wide_bus)
3051 {
3052 	u32 offset = 0;
3053 
3054 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
3055 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3056 
3057 	return offset;
3058 }
3059 
3060 /* Dumps GRC registers sequence with skip cycle.
3061  * Returns the dumped size in dwords.
3062  * - addr:	start GRC address in dwords
3063  * - total_len:	total no. of dwords to dump
3064  * - read_len:	no. consecutive dwords to read
3065  * - skip_len:	no. of dwords to skip (and fill with zeros)
3066  */
3067 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3068 										 struct ecore_ptt *p_ptt,
3069 										 u32 *dump_buf,
3070 										 bool dump,
3071 										 u32 addr,
3072 										 u32 total_len,
3073 										 u32 read_len,
3074 										 u32 skip_len)
3075 {
3076 	u32 offset = 0, reg_offset = 0;
3077 
3078 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3079 
3080 	if (!dump)
3081 		return offset + total_len;
3082 
3083 	while (reg_offset < total_len) {
3084 		u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3085 
3086 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3087 		reg_offset += curr_len;
3088 		addr += curr_len;
3089 
3090 		if (reg_offset < total_len) {
3091 			curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3092 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3093 			offset += curr_len;
3094 			reg_offset += curr_len;
3095 			addr += curr_len;
3096 		}
3097 	}
3098 
3099 	return offset;
3100 }
3101 
3102 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3103 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3104 									   struct ecore_ptt *p_ptt,
3105 									   struct dbg_array input_regs_arr,
3106 									   u32 *dump_buf,
3107 									   bool dump,
3108 									   bool block_enable[MAX_BLOCK_ID],
3109 									   u32 *num_dumped_reg_entries)
3110 {
3111 	u32 i, offset = 0, input_offset = 0;
3112 	bool mode_match = true;
3113 
3114 	*num_dumped_reg_entries = 0;
3115 
3116 	while (input_offset < input_regs_arr.size_in_dwords) {
3117 		const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *)&input_regs_arr.ptr[input_offset++];
3118 		u16 modes_buf_offset;
3119 		bool eval_mode;
3120 
3121 		/* Check mode/block */
3122 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3123 		if (eval_mode) {
3124 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3125 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3126 		}
3127 
3128 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
3129 			input_offset += cond_hdr->data_size;
3130 			continue;
3131 		}
3132 
3133 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3134 			const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *)&input_regs_arr.ptr[input_offset];
3135 
3136 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3137 				GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3138 				GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3139 				GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3140 			(*num_dumped_reg_entries)++;
3141 		}
3142 	}
3143 
3144 	return offset;
3145 }
3146 
3147 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3148 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3149 									 struct ecore_ptt *p_ptt,
3150 									 struct dbg_array input_regs_arr,
3151 									 u32 *dump_buf,
3152 									 bool dump,
3153 									 bool block_enable[MAX_BLOCK_ID],
3154 									 const char *split_type_name,
3155 									 u32 split_id,
3156 									 const char *param_name,
3157 									 const char *param_val)
3158 {
3159 	u32 num_dumped_reg_entries, offset;
3160 
3161 	/* Calculate register dump header size (and skip it for now) */
3162 	offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3163 
3164 	/* Dump registers */
3165 	offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3166 
3167 	/* Write register dump header */
3168 	if (dump && num_dumped_reg_entries > 0)
3169 		ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3170 
3171 	return num_dumped_reg_entries > 0 ? offset : 0;
3172 }
3173 
3174 /* Dumps registers according to the input registers array. Returns the dumped
3175  * size in dwords.
3176  */
3177 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3178 									struct ecore_ptt *p_ptt,
3179 									u32 *dump_buf,
3180 									bool dump,
3181 									bool block_enable[MAX_BLOCK_ID],
3182 									const char *param_name,
3183 									const char *param_val)
3184 {
3185 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3186 	struct chip_platform_defs *chip_platform;
3187 	u32 offset = 0, input_offset = 0;
3188 	u8 port_id, pf_id, vf_id;
3189 
3190 	chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3191 
3192 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3193 		const struct dbg_dump_split_hdr *split_hdr;
3194 		struct dbg_array curr_input_regs_arr;
3195 		u32 split_data_size;
3196 		u8 split_type_id;
3197 
3198 		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3199 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3200 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3201 		curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3202 		curr_input_regs_arr.size_in_dwords = split_data_size;
3203 
3204 		switch(split_type_id) {
3205 		case SPLIT_TYPE_NONE:
3206 			offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3207 			break;
3208 
3209 		case SPLIT_TYPE_PORT:
3210 			for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3211 				if (dump)
3212 					ecore_port_pretend(p_hwfn, p_ptt, port_id);
3213 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3214 			}
3215 			break;
3216 
3217 		case SPLIT_TYPE_PF:
3218 		case SPLIT_TYPE_PORT_PF:
3219 			for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3220 				if (dump)
3221 					ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3222 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3223 			}
3224 			break;
3225 
3226 		case SPLIT_TYPE_VF:
3227 			for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3228 				if (dump)
3229 					ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3230 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3231 			}
3232 			break;
3233 
3234 		default:
3235 			break;
3236 		}
3237 
3238 		input_offset += split_data_size;
3239 	}
3240 
3241 	/* Pretend to original PF */
3242 	if (dump)
3243 		ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3244 
3245 	return offset;
3246 }
3247 
3248 /* Dump reset registers. Returns the dumped size in dwords. */
3249 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3250 	struct ecore_ptt *p_ptt,
3251 	u32 *dump_buf,
3252 	bool dump)
3253 {
3254 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3255 	u32 i, offset = 0, num_regs = 0;
3256 
3257 	/* Calculate header size */
3258 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3259 
3260 	/* Write reset registers */
3261 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3262 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3263 			continue;
3264 
3265 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3266 		num_regs++;
3267 	}
3268 
3269 	/* Write header */
3270 	if (dump)
3271 		ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3272 
3273 	return offset;
3274 }
3275 
3276 /* Dump registers that are modified during GRC Dump and therefore must be
3277  * dumped first. Returns the dumped size in dwords.
3278  */
3279 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3280 										struct ecore_ptt *p_ptt,
3281 										u32 *dump_buf,
3282 										bool dump)
3283 {
3284 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3285 	u32 block_id, offset = 0, num_reg_entries = 0;
3286 	const struct dbg_attn_reg *attn_reg_arr;
3287 	u8 storm_id, reg_idx, num_attn_regs;
3288 
3289 	/* Calculate header size */
3290 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3291 
3292 	/* Write parity registers */
3293 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3294 		if (dev_data->block_in_reset[block_id] && dump)
3295 			continue;
3296 
3297 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3298 
3299 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3300 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3301 			u16 modes_buf_offset;
3302 			bool eval_mode;
3303 
3304 			/* Check mode */
3305 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3306 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3307 			if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3308 				continue;
3309 
3310 			/* Mode match: read & dump registers */
3311 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3312 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3313 			num_reg_entries += 2;
3314 		}
3315 	}
3316 
3317 	/* Write Storm stall status registers */
3318 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3319 		struct storm_defs *storm = &s_storm_defs[storm_id];
3320 
3321 		if (dev_data->block_in_reset[storm->block_id] && dump)
3322 			continue;
3323 
3324 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3325 			BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3326 		num_reg_entries++;
3327 	}
3328 
3329 	/* Write header */
3330 	if (dump)
3331 		ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3332 
3333 	return offset;
3334 }
3335 
3336 /* Dumps registers that can't be represented in the debug arrays */
3337 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3338 									   struct ecore_ptt *p_ptt,
3339 									   u32 *dump_buf,
3340 									   bool dump)
3341 {
3342 	u32 offset = 0;
3343 
3344 	offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3345 
3346 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3347 	 * skipped).
3348 	 */
3349 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3350 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3351 
3352 	return offset;
3353 }
3354 
3355 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3356  * dwords. The following parameters are dumped:
3357  * - name:	   dumped only if it's not OSAL_NULL.
3358  * - addr:	   in dwords, dumped only if name is OSAL_NULL.
3359  * - len:	   in dwords, always dumped.
3360  * - width:	   dumped if it's not zero.
3361  * - packed:	   dumped only if it's not false.
3362  * - mem_group:	   always dumped.
3363  * - is_storm:	   true only if the memory is related to a Storm.
3364  * - storm_letter: valid only if is_storm is true.
3365  *
3366  */
3367 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3368 								  u32 *dump_buf,
3369 								  bool dump,
3370 								  const char *name,
3371 								  u32 addr,
3372 								  u32 len,
3373 								  u32 bit_width,
3374 								  bool packed,
3375 								  const char *mem_group,
3376 								  bool is_storm,
3377 								  char storm_letter)
3378 {
3379 	u8 num_params = 3;
3380 	u32 offset = 0;
3381 	char buf[64];
3382 
3383 	if (!len)
3384 		DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3385 
3386 	if (bit_width)
3387 		num_params++;
3388 	if (packed)
3389 		num_params++;
3390 
3391 	/* Dump section header */
3392 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3393 
3394 	if (name) {
3395 
3396 		/* Dump name */
3397 		if (is_storm) {
3398 			OSAL_STRCPY(buf, "?STORM_");
3399 			buf[0] = storm_letter;
3400 			OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3401 		}
3402 		else {
3403 			OSAL_STRCPY(buf, name);
3404 		}
3405 
3406 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3407 	}
3408 	else {
3409 
3410 		/* Dump address */
3411 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3412 
3413 		offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3414 	}
3415 
3416 	/* Dump len */
3417 	offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3418 
3419 	/* Dump bit width */
3420 	if (bit_width)
3421 		offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3422 
3423 	/* Dump packed */
3424 	if (packed)
3425 		offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3426 
3427 	/* Dump reg type */
3428 	if (is_storm) {
3429 		OSAL_STRCPY(buf, "?STORM_");
3430 		buf[0] = storm_letter;
3431 		OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3432 	}
3433 	else {
3434 		OSAL_STRCPY(buf, mem_group);
3435 	}
3436 
3437 	offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3438 
3439 	return offset;
3440 }
3441 
3442 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3443  * Returns the dumped size in dwords.
3444  * The addr and len arguments are specified in dwords.
3445  */
3446 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3447 							  struct ecore_ptt *p_ptt,
3448 							  u32 *dump_buf,
3449 							  bool dump,
3450 							  const char *name,
3451 							  u32 addr,
3452 							  u32 len,
3453 							  bool wide_bus,
3454 							  u32 bit_width,
3455 							  bool packed,
3456 							  const char *mem_group,
3457 							  bool is_storm,
3458 							  char storm_letter)
3459 {
3460 	u32 offset = 0;
3461 
3462 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3463 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3464 
3465 	return offset;
3466 }
3467 
3468 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3469 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3470 									  struct ecore_ptt *p_ptt,
3471 									  struct dbg_array input_mems_arr,
3472 									  u32 *dump_buf,
3473 									  bool dump)
3474 {
3475 	u32 i, offset = 0, input_offset = 0;
3476 	bool mode_match = true;
3477 
3478 	while (input_offset < input_mems_arr.size_in_dwords) {
3479 		const struct dbg_dump_cond_hdr *cond_hdr;
3480 		u16 modes_buf_offset;
3481 		u32 num_entries;
3482 		bool eval_mode;
3483 
3484 		cond_hdr = (const struct dbg_dump_cond_hdr *)&input_mems_arr.ptr[input_offset++];
3485 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3486 
3487 		/* Check required mode */
3488 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3489 		if (eval_mode) {
3490 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3491 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3492 		}
3493 
3494 		if (!mode_match) {
3495 			input_offset += cond_hdr->data_size;
3496 			continue;
3497 		}
3498 
3499 		for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3500 			const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *)&input_mems_arr.ptr[input_offset];
3501 			u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3502 			bool is_storm = false, mem_wide_bus;
3503 			char storm_letter = 'a';
3504 			u32 mem_addr, mem_len;
3505 
3506 			if (mem_group_id >= MEM_GROUPS_NUM) {
3507 				DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3508 				return 0;
3509 			}
3510 
3511 			if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3512 				continue;
3513 
3514 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3515 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3516 			mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3517 
3518 			/* Update memory length for CCFC/TCFC memories
3519 			 * according to number of LCIDs/LTIDs.
3520 			 */
3521 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3522 				if (mem_len % MAX_LCIDS) {
3523 					DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3524 					return 0;
3525 				}
3526 
3527 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3528 			}
3529 			else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3530 				if (mem_len % MAX_LTIDS) {
3531 					DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3532 					return 0;
3533 				}
3534 
3535 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3536 			}
3537 
3538 			/* If memory is associated with Storm, udpate Storm
3539 			 * details.
3540 			 */
3541 			if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3542 				is_storm = true;
3543 				storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3544 			}
3545 
3546 			/* Dump memory */
3547 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3548 				0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3549 		}
3550 	}
3551 
3552 	return offset;
3553 }
3554 
3555 /* Dumps GRC memories according to the input array dump_mem.
3556  * Returns the dumped size in dwords.
3557  */
3558 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3559 								   struct ecore_ptt *p_ptt,
3560 								   u32 *dump_buf,
3561 								   bool dump)
3562 {
3563 	u32 offset = 0, input_offset = 0;
3564 
3565 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3566 		const struct dbg_dump_split_hdr *split_hdr;
3567 		struct dbg_array curr_input_mems_arr;
3568 		u32 split_data_size;
3569 		u8 split_type_id;
3570 
3571 		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3572 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3573 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3574 		curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3575 		curr_input_mems_arr.size_in_dwords = split_data_size;
3576 
3577 		switch (split_type_id) {
3578 		case SPLIT_TYPE_NONE:
3579 			offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3580 			break;
3581 
3582 		default:
3583 			DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3584 			break;
3585 		}
3586 
3587 		input_offset += split_data_size;
3588 	}
3589 
3590 	return offset;
3591 }
3592 
3593 /* Dumps GRC context data for the specified Storm.
3594  * Returns the dumped size in dwords.
3595  * The lid_size argument is specified in quad-regs.
3596  */
3597 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3598 								   struct ecore_ptt *p_ptt,
3599 								   u32 *dump_buf,
3600 								   bool dump,
3601 								   const char *name,
3602 								   u32 num_lids,
3603 								   u32 lid_size,
3604 								   u32 rd_reg_addr,
3605 								   u8 storm_id)
3606 {
3607 	struct storm_defs *storm = &s_storm_defs[storm_id];
3608 	u32 i, lid, total_size, offset = 0;
3609 
3610 	if (!lid_size)
3611 		return 0;
3612 
3613 	lid_size *= BYTES_IN_DWORD;
3614 	total_size = num_lids * lid_size;
3615 
3616 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3617 
3618 	if (!dump)
3619 		return offset + total_size;
3620 
3621 	/* Dump context data */
3622 	for (lid = 0; lid < num_lids; lid++) {
3623 		for (i = 0; i < lid_size; i++, offset++) {
3624 			ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3625 			*(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3626 		}
3627 	}
3628 
3629 	return offset;
3630 }
3631 
3632 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3633 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3634 							  struct ecore_ptt *p_ptt,
3635 							  u32 *dump_buf,
3636 							  bool dump)
3637 {
3638 	u32 offset = 0;
3639 	u8 storm_id;
3640 
3641 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3642 		struct storm_defs *storm = &s_storm_defs[storm_id];
3643 
3644 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3645 			continue;
3646 
3647 		/* Dump Conn AG context size */
3648 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3649 			storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3650 
3651 		/* Dump Conn ST context size */
3652 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3653 			storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3654 
3655 		/* Dump Task AG context size */
3656 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3657 			storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3658 
3659 		/* Dump Task ST context size */
3660 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3661 			storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3662 	}
3663 
3664 	return offset;
3665 }
3666 
3667 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3668 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3669 							   struct ecore_ptt *p_ptt,
3670 							   u32 *dump_buf,
3671 							   bool dump)
3672 {
3673 	char buf[10] = "IOR_SET_?";
3674 	u32 addr, offset = 0;
3675 	u8 storm_id, set_id;
3676 
3677 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3678 		struct storm_defs *storm = &s_storm_defs[storm_id];
3679 
3680 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3681 			continue;
3682 
3683 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3684 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3685 			buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3686 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3687 		}
3688 	}
3689 
3690 	return offset;
3691 }
3692 
3693 /* Dump VFC CAM. Returns the dumped size in dwords. */
3694 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3695 								  struct ecore_ptt *p_ptt,
3696 								  u32 *dump_buf,
3697 								  bool dump,
3698 								  u8 storm_id)
3699 {
3700 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3701 	struct storm_defs *storm = &s_storm_defs[storm_id];
3702 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3703 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3704 	u32 row, i, offset = 0;
3705 
3706 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3707 
3708 	if (!dump)
3709 		return offset + total_size;
3710 
3711 	/* Prepare CAM address */
3712 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3713 
3714 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3715 
3716 		/* Write VFC CAM command */
3717 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3718 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3719 
3720 		/* Write VFC CAM address */
3721 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3722 
3723 		/* Read VFC CAM read response */
3724 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3725 	}
3726 
3727 	return offset;
3728 }
3729 
3730 /* Dump VFC RAM. Returns the dumped size in dwords. */
3731 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3732 								  struct ecore_ptt *p_ptt,
3733 								  u32 *dump_buf,
3734 								  bool dump,
3735 								  u8 storm_id,
3736 								  struct vfc_ram_defs *ram_defs)
3737 {
3738 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3739 	struct storm_defs *storm = &s_storm_defs[storm_id];
3740 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3741 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3742 	u32 row, i, offset = 0;
3743 
3744 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3745 
3746 	/* Prepare RAM address */
3747 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3748 
3749 	if (!dump)
3750 		return offset + total_size;
3751 
3752 	for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3753 
3754 		/* Write VFC RAM command */
3755 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3756 
3757 		/* Write VFC RAM address */
3758 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3759 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3760 
3761 		/* Read VFC RAM read response */
3762 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3763 	}
3764 
3765 	return offset;
3766 }
3767 
3768 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3769 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3770 							  struct ecore_ptt *p_ptt,
3771 							  u32 *dump_buf,
3772 							  bool dump)
3773 {
3774 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3775 	u8 storm_id, i;
3776 	u32 offset = 0;
3777 
3778 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3779 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3780 			!s_storm_defs[storm_id].has_vfc ||
3781 			(storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3782 			continue;
3783 
3784 		/* Read CAM */
3785 		offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3786 
3787 		/* Read RAM */
3788 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3789 			offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3790 	}
3791 
3792 	return offset;
3793 }
3794 
3795 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3796 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3797 							  struct ecore_ptt *p_ptt,
3798 							  u32 *dump_buf,
3799 							  bool dump)
3800 {
3801 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3802 	u32 offset = 0;
3803 	u8 rss_mem_id;
3804 
3805 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3806 		u32 rss_addr, num_entries, total_dwords;
3807 		struct rss_mem_defs *rss_defs;
3808 		bool packed;
3809 
3810 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3811 		rss_addr = rss_defs->addr;
3812 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3813 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3814 		packed = (rss_defs->entry_width == 16);
3815 
3816 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3817 			rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3818 
3819 		/* Dump RSS data */
3820 		if (!dump) {
3821 			offset += total_dwords;
3822 			continue;
3823 		}
3824 
3825 		while (total_dwords) {
3826 			u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3827 			ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3828 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3829 			total_dwords -= num_dwords_to_read;
3830 			rss_addr++;
3831 		}
3832 	}
3833 
3834 	return offset;
3835 }
3836 
3837 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3838 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3839 								  struct ecore_ptt *p_ptt,
3840 								  u32 *dump_buf,
3841 								  bool dump,
3842 								  u8 big_ram_id)
3843 {
3844 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3845 	u32 block_size, ram_size, offset = 0, reg_val, i;
3846 	char mem_name[12] = "???_BIG_RAM";
3847 	char type_name[8] = "???_RAM";
3848 	struct big_ram_defs *big_ram;
3849 
3850 	big_ram = &s_big_ram_defs[big_ram_id];
3851 	ram_size = big_ram->ram_size[dev_data->chip_id];
3852 
3853 	reg_val = ecore_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3854 	block_size = reg_val & (1 << big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128;
3855 
3856 	OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3857 	OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3858 
3859 	/* Dump memory header */
3860 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, block_size * 8, false, type_name, false, 0);
3861 
3862 	/* Read and dump Big RAM data */
3863 	if (!dump)
3864 		return offset + ram_size;
3865 
3866 	/* Dump Big RAM */
3867 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) {
3868 		ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3869 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), BRB_REG_BIG_RAM_DATA_SIZE, false);
3870 	}
3871 
3872 	return offset;
3873 }
3874 
3875 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3876 							  struct ecore_ptt *p_ptt,
3877 							  u32 *dump_buf,
3878 							  bool dump)
3879 {
3880 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3881 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3882 	bool halted = false;
3883 	u32 offset = 0;
3884 
3885 	/* Halt MCP */
3886 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3887 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3888 		if (!halted)
3889 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3890 	}
3891 
3892 	/* Dump MCP scratchpad */
3893 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3894 		ECORE_IS_E5(p_hwfn->p_dev) ? MCP_REG_SCRATCH_SIZE_E5 : MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0);
3895 
3896 	/* Dump MCP cpu_reg_file */
3897 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3898 		MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3899 
3900 	/* Dump MCP registers */
3901 	block_enable[BLOCK_MCP] = true;
3902 	offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3903 
3904 	/* Dump required non-MCP registers */
3905 	offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3906 	offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3907 
3908 	/* Release MCP */
3909 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3910 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3911 
3912 	return offset;
3913 }
3914 
3915 /* Dumps the tbus indirect memory for all PHYs. */
3916 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3917 							  struct ecore_ptt *p_ptt,
3918 							  u32 *dump_buf,
3919 							  bool dump)
3920 {
3921 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3922 	char mem_name[32];
3923 	u8 phy_id;
3924 
3925 	for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3926 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3927 		struct phy_defs *phy_defs;
3928 		u8 *bytes_buf;
3929 
3930 		phy_defs = &s_phy_defs[phy_id];
3931 		addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3932 		addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3933 		data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3934 		data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3935 
3936 		if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3937 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3938 
3939 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3940 
3941 		if (!dump) {
3942 			offset += PHY_DUMP_SIZE_DWORDS;
3943 			continue;
3944 		}
3945 
3946 		bytes_buf = (u8 *)(dump_buf + offset);
3947 		for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3948 			ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3949 			for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3950 				ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3951 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3952 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3953 			}
3954 		}
3955 
3956 		offset += PHY_DUMP_SIZE_DWORDS;
3957 	}
3958 
3959 	return offset;
3960 }
3961 
3962 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3963 								  struct ecore_ptt *p_ptt,
3964 								  enum block_id block_id,
3965 								  u8 line_id,
3966 								  u8 enable_mask,
3967 								  u8 right_shift,
3968 								  u8 force_valid_mask,
3969 								  u8 force_frame_mask)
3970 {
3971 	struct block_defs *block = s_block_defs[block_id];
3972 
3973 	ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3974 	ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3975 	ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3976 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3977 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3978 }
3979 
3980 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3981 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3982 									   struct ecore_ptt *p_ptt,
3983 									   u32 *dump_buf,
3984 									   bool dump)
3985 {
3986 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3987 	u32 block_id, line_id, offset = 0;
3988 
3989 	/* don't dump static debug if a debug bus recording is in progress */
3990 	if (dump && ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3991 		return 0;
3992 
3993 	if (dump) {
3994 		/* Disable all blocks debug output */
3995 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3996 			struct block_defs *block = s_block_defs[block_id];
3997 
3998 			if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3999 				ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4000 		}
4001 
4002 		ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4003 		ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
4004 		ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
4005 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
4006 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
4007 	}
4008 
4009 	/* Dump all static debug lines for each relevant block */
4010 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
4011 		struct block_defs *block = s_block_defs[block_id];
4012 		struct dbg_bus_block *block_desc;
4013 		u32 block_dwords;
4014 
4015 		if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
4016 			continue;
4017 
4018 		block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
4019 		block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
4020 
4021 		/* Dump static section params */
4022 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
4023 
4024 		if (!dump) {
4025 			offset += block_dwords;
4026 			continue;
4027 		}
4028 
4029 		/* If all lines are invalid - dump zeros */
4030 		if (dev_data->block_in_reset[block_id]) {
4031 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
4032 			offset += block_dwords;
4033 			continue;
4034 		}
4035 
4036 		/* Enable block's client */
4037 		ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
4038 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
4039 
4040 			/* Configure debug line ID */
4041 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
4042 
4043 			/* Read debug line info */
4044 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
4045 		}
4046 
4047 		/* Disable block's client and debug output */
4048 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4049 		ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4050 	}
4051 
4052 	if (dump) {
4053 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4054 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4055 	}
4056 
4057 	return offset;
4058 }
4059 
4060 /* Performs GRC Dump to the specified buffer.
4061  * Returns the dumped size in dwords.
4062  */
4063 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4064 									  struct ecore_ptt *p_ptt,
4065 									  u32 *dump_buf,
4066 									  bool dump,
4067 									  u32 *num_dumped_dwords)
4068 {
4069 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4070 	bool is_asic, parities_masked = false;
4071 	u8 i, port_mode = 0;
4072 	u32 offset = 0;
4073 
4074 	is_asic = dev_data->platform_id == PLATFORM_ASIC;
4075 
4076 	*num_dumped_dwords = 0;
4077 
4078 	if (dump) {
4079 
4080 		/* Find port mode */
4081 		switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4082 		case 0: port_mode = 1; break;
4083 		case 1: port_mode = 2; break;
4084 		case 2: port_mode = 4; break;
4085 		}
4086 
4087 		/* Update reset state */
4088 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4089 	}
4090 
4091 	/* Dump global params */
4092 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4093 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4094 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4095 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4096 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4097 
4098 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4099 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4100 		offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4101 
4102 	/* Take all blocks out of reset (using reset registers) */
4103 	if (dump) {
4104 		ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4105 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4106 	}
4107 
4108 	/* Disable all parities using MFW command */
4109 	if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4110 			parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4111 			if (!parities_masked) {
4112 				DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4113 				if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4114 					return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4115 			}
4116 		}
4117 
4118 	/* Dump modified registers (dumped before modifying them) */
4119 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4120 		offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4121 
4122 	/* Stall storms */
4123 	if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4124 		ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4125 
4126 	/* Dump all regs  */
4127 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4128 		bool block_enable[MAX_BLOCK_ID];
4129 
4130 		/* Dump all blocks except MCP */
4131 		for (i = 0; i < MAX_BLOCK_ID; i++)
4132 			block_enable[i] = true;
4133 		block_enable[BLOCK_MCP] = false;
4134 		offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4135 
4136 		/* Dump special registers */
4137 		offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4138 	}
4139 
4140 	/* Dump memories */
4141 	offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4142 
4143 	/* Dump MCP */
4144 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4145 		offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4146 
4147 	/* Dump context */
4148 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4149 		offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4150 
4151 	/* Dump RSS memories */
4152 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4153 		offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4154 
4155 	/* Dump Big RAM */
4156 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4157 		if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4158 			offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4159 
4160 	/* Dump IORs */
4161 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4162 		offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4163 
4164 	/* Dump VFC */
4165 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4166 		offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4167 
4168 	/* Dump PHY tbus */
4169 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4170 		offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4171 
4172 	/* Dump static debug data  */
4173 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4174 		offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4175 
4176 	/* Dump last section */
4177 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4178 
4179 	if (dump) {
4180 
4181 		/* Unstall storms */
4182 		if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4183 			ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4184 
4185 		/* Clear parity status */
4186 		if (is_asic)
4187 			ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4188 
4189 		/* Enable all parities using MFW command */
4190 		if (parities_masked)
4191 			ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4192 	}
4193 
4194 	*num_dumped_dwords = offset;
4195 
4196 	return DBG_STATUS_OK;
4197 }
4198 
4199 /* Writes the specified failing Idle Check rule to the specified buffer.
4200  * Returns the dumped size in dwords.
4201  */
4202 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4203 									   struct ecore_ptt *p_ptt,
4204 									   u32 *dump_buf,
4205 									   bool dump,
4206 									   u16 rule_id,
4207 									   const struct dbg_idle_chk_rule *rule,
4208 									   u16 fail_entry_id,
4209 									   u32 *cond_reg_values)
4210 {
4211 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4212 	const struct dbg_idle_chk_cond_reg *cond_regs;
4213 	const struct dbg_idle_chk_info_reg *info_regs;
4214 	u32 i, next_reg_offset = 0, offset = 0;
4215 	struct dbg_idle_chk_result_hdr *hdr;
4216 	const union dbg_idle_chk_reg *regs;
4217 	u8 reg_id;
4218 
4219 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4220 	regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4221 	cond_regs = &regs[0].cond_reg;
4222 	info_regs = &regs[rule->num_cond_regs].info_reg;
4223 
4224 	/* Dump rule data */
4225 	if (dump) {
4226 		OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4227 		hdr->rule_id = rule_id;
4228 		hdr->mem_entry_id = fail_entry_id;
4229 		hdr->severity = rule->severity;
4230 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4231 	}
4232 
4233 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4234 
4235 	/* Dump condition register values */
4236 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4237 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4238 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4239 
4240 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4241 
4242 		/* Write register header */
4243 		if (!dump) {
4244 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4245 			continue;
4246 		}
4247 
4248 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4249 		OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4250 		reg_hdr->start_entry = reg->start_entry;
4251 		reg_hdr->size = reg->entry_size;
4252 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4253 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4254 
4255 		/* Write register values */
4256 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4257 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4258 	}
4259 
4260 	/* Dump info register values */
4261 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4262 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4263 		u32 block_id;
4264 
4265 		/* Check if register's block is in reset */
4266 		if (!dump) {
4267 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4268 			continue;
4269 		}
4270 
4271 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4272 		if (block_id >= MAX_BLOCK_ID) {
4273 			DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4274 			return 0;
4275 		}
4276 
4277 		if (!dev_data->block_in_reset[block_id]) {
4278 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4279 			bool wide_bus, eval_mode, mode_match = true;
4280 			u16 modes_buf_offset;
4281 			u32 addr;
4282 
4283 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4284 
4285 			/* Check mode */
4286 			eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4287 			if (eval_mode) {
4288 				modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4289 				mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4290 			}
4291 
4292 			if (!mode_match)
4293 				continue;
4294 
4295 			addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4296 			wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4297 
4298 			/* Write register header */
4299 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4300 			hdr->num_dumped_info_regs++;
4301 			OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4302 			reg_hdr->size = reg->size;
4303 			SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4304 
4305 			/* Write register values */
4306 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4307 		}
4308 	}
4309 
4310 	return offset;
4311 }
4312 
4313 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4314 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4315 											struct ecore_ptt *p_ptt,
4316 											u32 *dump_buf,
4317 											bool dump,
4318 											const struct dbg_idle_chk_rule *input_rules,
4319 											u32 num_input_rules,
4320 											u32 *num_failing_rules)
4321 {
4322 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4323 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4324 	u32 i, offset = 0;
4325 	u16 entry_id;
4326 	u8 reg_id;
4327 
4328 	*num_failing_rules = 0;
4329 
4330 	for (i = 0; i < num_input_rules; i++) {
4331 		const struct dbg_idle_chk_cond_reg *cond_regs;
4332 		const struct dbg_idle_chk_rule *rule;
4333 		const union dbg_idle_chk_reg *regs;
4334 		u16 num_reg_entries = 1;
4335 		bool check_rule = true;
4336 		const u32 *imm_values;
4337 
4338 		rule = &input_rules[i];
4339 		regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4340 		cond_regs = &regs[0].cond_reg;
4341 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4342 
4343 		/* Check if all condition register blocks are out of reset, and
4344 		 * find maximal number of entries (all condition registers that
4345 		 * are memories must have the same size, which is > 1).
4346 		 */
4347 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4348 			u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4349 
4350 			if (block_id >= MAX_BLOCK_ID) {
4351 				DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4352 				return 0;
4353 			}
4354 
4355 			check_rule = !dev_data->block_in_reset[block_id];
4356 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4357 				num_reg_entries = cond_regs[reg_id].num_entries;
4358 		}
4359 
4360 		if (!check_rule && dump)
4361 			continue;
4362 
4363 		if (!dump) {
4364 			u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4365 
4366 			offset += num_reg_entries * entry_dump_size;
4367 			(*num_failing_rules) += num_reg_entries;
4368 			continue;
4369 		}
4370 
4371 		/* Go over all register entries (number of entries is the same for all
4372 		 * condition registers).
4373 		 */
4374 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4375 			u32 next_reg_offset = 0;
4376 
4377 			/* Read current entry of all condition registers */
4378 			for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4379 				const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4380 				u32 padded_entry_size, addr;
4381 				bool wide_bus;
4382 
4383 				/* Find GRC address (if it's a memory, the address of the
4384 				 * specific entry is calculated).
4385 				 */
4386 				addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4387 				wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4388 				if (reg->num_entries > 1 || reg->start_entry > 0) {
4389 					padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4390 					addr += (reg->start_entry + entry_id) * padded_entry_size;
4391 				}
4392 
4393 				/* Read registers */
4394 				if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4395 					DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4396 					return 0;
4397 				}
4398 
4399 				next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4400 			}
4401 
4402 			/* Call rule condition function. if returns true, it's a failure.*/
4403 			if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4404 				offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4405 				(*num_failing_rules)++;
4406 			}
4407 		}
4408 	}
4409 
4410 	return offset;
4411 }
4412 
4413 /* Performs Idle Check Dump to the specified buffer.
4414  * Returns the dumped size in dwords.
4415  */
4416 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4417 							   struct ecore_ptt *p_ptt,
4418 							   u32 *dump_buf,
4419 							   bool dump)
4420 {
4421 	u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4422 
4423 	/* Dump global params */
4424 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4425 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4426 
4427 	/* Dump idle check section header with a single parameter */
4428 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4429 	num_failing_rules_offset = offset;
4430 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4431 
4432 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4433 		const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4434 		bool eval_mode, mode_match = true;
4435 		u32 curr_failing_rules;
4436 		u16 modes_buf_offset;
4437 
4438 		/* Check mode */
4439 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4440 		if (eval_mode) {
4441 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4442 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4443 		}
4444 
4445 		if (mode_match) {
4446 			offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4447 			num_failing_rules += curr_failing_rules;
4448 		}
4449 
4450 		input_offset += cond_hdr->data_size;
4451 	}
4452 
4453 	/* Overwrite num_rules parameter */
4454 	if (dump)
4455 		ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4456 
4457 	/* Dump last section */
4458 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4459 
4460 	return offset;
4461 }
4462 
4463 /* Finds the meta data image in NVRAM */
4464 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4465 											  struct ecore_ptt *p_ptt,
4466 											  u32 image_type,
4467 											  u32 *nvram_offset_bytes,
4468 											  u32 *nvram_size_bytes)
4469 {
4470 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4471 	struct mcp_file_att file_att;
4472 	int nvm_result;
4473 
4474 	/* Call NVRAM get file command */
4475 	nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32 *)&file_att);
4476 
4477 	/* Check response */
4478 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4479 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4480 
4481 	/* Update return values */
4482 	*nvram_offset_bytes = file_att.nvm_start_addr;
4483 	*nvram_size_bytes = file_att.len;
4484 
4485 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4486 
4487 	/* Check alignment */
4488 	if (*nvram_size_bytes & 0x3)
4489 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4490 
4491 	return DBG_STATUS_OK;
4492 }
4493 
4494 /* Reads data from NVRAM */
4495 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4496 										struct ecore_ptt *p_ptt,
4497 										u32 nvram_offset_bytes,
4498 										u32 nvram_size_bytes,
4499 										u32 *ret_buf)
4500 {
4501 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4502 	s32 bytes_left = nvram_size_bytes;
4503 	u32 read_offset = 0;
4504 
4505 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4506 
4507 	do {
4508 		bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4509 
4510 		/* Call NVRAM read command */
4511 		if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset)))
4512 			return DBG_STATUS_NVRAM_READ_FAILED;
4513 
4514 		/* Check response */
4515 		if ((ret_mcp_resp  & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4516 			return DBG_STATUS_NVRAM_READ_FAILED;
4517 
4518 		/* Update read offset */
4519 		read_offset += ret_read_size;
4520 		bytes_left -= ret_read_size;
4521 	} while (bytes_left > 0);
4522 
4523 	return DBG_STATUS_OK;
4524 }
4525 
4526 /* Get info on the MCP Trace data in the scratchpad:
4527  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4528  * - trace_data_size (OUT): trace data size in bytes (without the header)
4529  */
4530 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4531 													 struct ecore_ptt *p_ptt,
4532 													 u32 *trace_data_grc_addr,
4533 													 u32 *trace_data_size)
4534 {
4535 	u32 spad_trace_offsize, signature;
4536 
4537 	/* Read trace section offsize structure from MCP scratchpad */
4538 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4539 
4540 	/* Extract trace section address from offsize (in scratchpad) */
4541 	*trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4542 
4543 	/* Read signature from MCP trace section */
4544 	signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4545 
4546 	if (signature != MFW_TRACE_SIGNATURE)
4547 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4548 
4549 	/* Read trace size from MCP trace section */
4550 	*trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4551 
4552 	return DBG_STATUS_OK;
4553 }
4554 
4555 /* Reads MCP trace meta data image from NVRAM
4556  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4557  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4558  *			      loaded from file).
4559  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4560  */
4561 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4562 													 struct ecore_ptt *p_ptt,
4563 													 u32 trace_data_size_bytes,
4564 													 u32 *running_bundle_id,
4565 													 u32 *trace_meta_offset,
4566 													 u32 *trace_meta_size)
4567 {
4568 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4569 
4570 	/* Read MCP trace section offsize structure from MCP scratchpad */
4571 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4572 
4573 	/* Find running bundle ID */
4574 	running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4575 	*running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4576 	if (*running_bundle_id > 1)
4577 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4578 
4579 	/* Find image in NVRAM */
4580 	nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4581 	return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4582 }
4583 
4584 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4585 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4586 												 struct ecore_ptt *p_ptt,
4587 												 u32 nvram_offset_in_bytes,
4588 												 u32 size_in_bytes,
4589 												 u32 *buf)
4590 {
4591 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4592 	enum dbg_status status;
4593 	u32 signature;
4594 
4595 	/* Read meta data from NVRAM */
4596 	status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4597 	if (status != DBG_STATUS_OK)
4598 		return status;
4599 
4600 	/* Extract and check first signature */
4601 	signature = ecore_read_unaligned_dword(byte_buf);
4602 	byte_buf += sizeof(signature);
4603 	if (signature != NVM_MAGIC_VALUE)
4604 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4605 
4606 	/* Extract number of modules */
4607 	modules_num = *(byte_buf++);
4608 
4609 	/* Skip all modules */
4610 	for (i = 0; i < modules_num; i++) {
4611 		module_len = *(byte_buf++);
4612 		byte_buf += module_len;
4613 	}
4614 
4615 	/* Extract and check second signature */
4616 	signature = ecore_read_unaligned_dword(byte_buf);
4617 	byte_buf += sizeof(signature);
4618 	if (signature != NVM_MAGIC_VALUE)
4619 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4620 
4621 	return DBG_STATUS_OK;
4622 }
4623 
4624 /* Dump MCP Trace */
4625 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4626 											struct ecore_ptt *p_ptt,
4627 											u32 *dump_buf,
4628 											bool dump,
4629 											u32 *num_dumped_dwords)
4630 {
4631 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4632 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4633 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4634 	u32 running_bundle_id, offset = 0;
4635 	enum dbg_status status;
4636 	bool mcp_access;
4637 	int halted = 0;
4638 
4639 	*num_dumped_dwords = 0;
4640 
4641 	mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4642 
4643 	/* Get trace data info */
4644 	status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4645 	if (status != DBG_STATUS_OK)
4646 		return status;
4647 
4648 	/* Dump global params */
4649 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4650 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4651 
4652 	/* Halt MCP while reading from scratchpad so the read data will be
4653 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4654 	 * risk that it may be corrupt.
4655 	 */
4656 	if (dump && mcp_access) {
4657 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4658 		if (!halted)
4659 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4660 	}
4661 
4662 	/* Find trace data size */
4663 	trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4664 
4665 	/* Dump trace data section header and param */
4666 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4667 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4668 
4669 	/* Read trace data from scratchpad into dump buffer */
4670 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4671 
4672 	/* Resume MCP (only if halt succeeded) */
4673 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4674 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4675 
4676 	/* Dump trace meta section header */
4677 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4678 
4679 	/* Read trace meta only if NVRAM access is enabled
4680 	 * (trace_meta_size_bytes is dword-aligned).
4681 	 */
4682 	if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4683 		status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4684 		if (status == DBG_STATUS_OK)
4685 			trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4686 	}
4687 
4688 	/* Dump trace meta size param */
4689 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4690 
4691 	/* Read trace meta image into dump buffer */
4692 	if (dump && trace_meta_size_dwords)
4693 		status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4694 	if (status == DBG_STATUS_OK)
4695 		offset += trace_meta_size_dwords;
4696 
4697 	/* Dump last section */
4698 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4699 
4700 	*num_dumped_dwords = offset;
4701 
4702 	/* If no mcp access, indicate that the dump doesn't contain the meta
4703 	 * data from NVRAM.
4704 	 */
4705 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4706 }
4707 
4708 /* Dump GRC FIFO */
4709 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4710 										   struct ecore_ptt *p_ptt,
4711 										   u32 *dump_buf,
4712 										   bool dump,
4713 										   u32 *num_dumped_dwords)
4714 {
4715 	u32 dwords_read, size_param_offset, offset = 0;
4716 	bool fifo_has_data;
4717 
4718 	*num_dumped_dwords = 0;
4719 
4720 	/* Dump global params */
4721 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4722 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4723 
4724 	/* Dump fifo data section header and param. The size param is 0 for
4725 	 * now, and is overwritten after reading the FIFO.
4726 	 */
4727 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4728 	size_param_offset = offset;
4729 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4730 
4731 	if (dump) {
4732 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4733 
4734 		/* Pull available data from fifo. Use DMAE since this is
4735 		 * widebus memory and must be accessed atomically. Test for
4736 		 * dwords_read not passing buffer size since more entries could
4737 		 * be added to the buffer as we
4738 		 * are emptying it.
4739 		 */
4740 		for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4741 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO), REG_FIFO_ELEMENT_DWORDS, true);
4742 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4743 		}
4744 
4745 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4746 	}
4747 	else {
4748 
4749 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4750 		 * test how much data is available, except for reading it.
4751 		 */
4752 		offset += REG_FIFO_DEPTH_DWORDS;
4753 	}
4754 
4755 	/* Dump last section */
4756 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4757 
4758 	*num_dumped_dwords = offset;
4759 
4760 	return DBG_STATUS_OK;
4761 }
4762 
4763 /* Dump IGU FIFO */
4764 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4765 										   struct ecore_ptt *p_ptt,
4766 										   u32 *dump_buf,
4767 										   bool dump,
4768 										   u32 *num_dumped_dwords)
4769 {
4770 	u32 dwords_read, size_param_offset, offset = 0;
4771 	bool fifo_has_data;
4772 
4773 	*num_dumped_dwords = 0;
4774 
4775 	/* Dump global params */
4776 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4777 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4778 
4779 	/* Dump fifo data section header and param. The size param is 0 for
4780 	 * now, and is overwritten after reading the FIFO.
4781 	 */
4782 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4783 	size_param_offset = offset;
4784 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4785 
4786 	if (dump) {
4787 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4788 
4789 		/* Pull available data from fifo. Use DMAE since this is
4790 		 * widebus memory and must be accessed atomically. Test for
4791 		 * dwords_read not passing buffer size since more entries could
4792 		 * be added to the buffer as we are emptying it.
4793 		 */
4794 		for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4795 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY), IGU_FIFO_ELEMENT_DWORDS, true);
4796 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4797 		}
4798 
4799 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4800 	}
4801 	else {
4802 
4803 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4804 		 * test how much data is available, except for reading it.
4805 		 */
4806 		offset += IGU_FIFO_DEPTH_DWORDS;
4807 	}
4808 
4809 	/* Dump last section */
4810 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4811 
4812 	*num_dumped_dwords = offset;
4813 
4814 	return DBG_STATUS_OK;
4815 }
4816 
4817 /* Protection Override dump */
4818 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4819 													  struct ecore_ptt *p_ptt,
4820 													  u32 *dump_buf,
4821 													  bool dump,
4822 													  u32 *num_dumped_dwords)
4823 {
4824 	u32 size_param_offset, override_window_dwords, offset = 0;
4825 
4826 	*num_dumped_dwords = 0;
4827 
4828 	/* Dump global params */
4829 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4830 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4831 
4832 	/* Dump data section header and param. The size param is 0 for now,
4833 	 * and is overwritten after reading the data.
4834 	 */
4835 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4836 	size_param_offset = offset;
4837 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4838 
4839 	if (dump) {
4840 		/* Add override window info to buffer */
4841 		override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4842 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW), override_window_dwords, true);
4843 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4844 	}
4845 	else {
4846 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4847 	}
4848 
4849 	/* Dump last section */
4850 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4851 
4852 	*num_dumped_dwords = offset;
4853 
4854 	return DBG_STATUS_OK;
4855 }
4856 
4857 /* Performs FW Asserts Dump to the specified buffer.
4858  * Returns the dumped size in dwords.
4859  */
4860 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4861 								 struct ecore_ptt *p_ptt,
4862 								 u32 *dump_buf,
4863 								 bool dump)
4864 {
4865 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4866 	struct fw_asserts_ram_section *asserts;
4867 	char storm_letter_str[2] = "?";
4868 	struct fw_info fw_info;
4869 	u32 offset = 0;
4870 	u8 storm_id;
4871 
4872 	/* Dump global params */
4873 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4874 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4875 
4876 	/* Find Storm dump size */
4877 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4878 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4879 		struct storm_defs *storm = &s_storm_defs[storm_id];
4880 
4881 		if (dev_data->block_in_reset[storm->block_id])
4882 			continue;
4883 
4884 		/* Read FW info for the current Storm  */
4885 		ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4886 
4887 		asserts = &fw_info.fw_asserts_section;
4888 
4889 		/* Dump FW Asserts section header and params */
4890 		storm_letter_str[0] = storm->letter;
4891 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4892 		offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4893 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4894 
4895 		/* Read and dump FW Asserts data */
4896 		if (!dump) {
4897 			offset += asserts->list_element_dword_size;
4898 			continue;
4899 		}
4900 
4901 		fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4902 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4903 		next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4904 		next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4905 		last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4906 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4907 					last_list_idx * asserts->list_element_dword_size;
4908 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4909 	}
4910 
4911 	/* Dump last section */
4912 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4913 
4914 	return offset;
4915 }
4916 
4917 /***************************** Public Functions *******************************/
4918 
4919 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4920 {
4921 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4922 	u8 buf_id;
4923 
4924 	/* convert binary data to debug arrays */
4925 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4926 		s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset);
4927 		s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4928 	}
4929 
4930 	return DBG_STATUS_OK;
4931 }
4932 
4933 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4934 {
4935 	if (ver < TOOLS_VERSION)
4936 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4937 
4938 	s_app_ver = ver;
4939 
4940 	return DBG_STATUS_OK;
4941 }
4942 
4943 u32 ecore_dbg_get_fw_func_ver(void)
4944 {
4945 	return TOOLS_VERSION;
4946 }
4947 
4948 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4949 {
4950 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4951 
4952 	return (enum chip_ids)dev_data->chip_id;
4953 }
4954 
4955 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4956 									struct ecore_ptt *p_ptt,
4957 									bool one_shot_en,
4958 									u8 force_hw_dwords,
4959 									bool unify_inputs,
4960 									bool grc_input_en)
4961 {
4962 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4963 	enum dbg_status status;
4964 
4965 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4966 	if (status != DBG_STATUS_OK)
4967 		return status;
4968 
4969 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4970 
4971 	if (force_hw_dwords &&
4972 		force_hw_dwords != 4 &&
4973 		force_hw_dwords != 8)
4974 		return DBG_STATUS_INVALID_ARGS;
4975 
4976 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4977 		return DBG_STATUS_DBG_BUS_IN_USE;
4978 
4979 	/* Update reset state of all blocks */
4980 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4981 
4982 	/* Disable all debug inputs */
4983 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4984 	if (status != DBG_STATUS_OK)
4985 		return status;
4986 
4987 	/* Reset DBG block */
4988 	ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4989 
4990 	/* Set one-shot / wrap-around */
4991 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4992 
4993 	/* Init state params */
4994 	OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4995 	dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4996 	dev_data->bus.state = DBG_BUS_STATE_READY;
4997 	dev_data->bus.one_shot_en = one_shot_en;
4998 	dev_data->bus.hw_dwords = force_hw_dwords;
4999 	dev_data->bus.grc_input_en = grc_input_en;
5000 	dev_data->bus.unify_inputs = unify_inputs;
5001 	dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
5002 
5003 	/* Init special DBG block */
5004 	if (grc_input_en)
5005 		SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5006 
5007 	return DBG_STATUS_OK;
5008 }
5009 
5010 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
5011 											 struct ecore_ptt *p_ptt,
5012 											 u16 buf_size_kb)
5013 {
5014 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5015 	dma_addr_t pci_buf_phys_addr;
5016 	void *pci_buf;
5017 
5018 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
5019 
5020 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5021 		return DBG_STATUS_OUTPUT_ALREADY_SET;
5022 	if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
5023 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5024 
5025 	dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
5026 	dev_data->bus.pci_buf.size = buf_size_kb * 1024;
5027 	if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
5028 		return DBG_STATUS_INVALID_ARGS;
5029 
5030 	pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
5031 	if (!pci_buf)
5032 		return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
5033 
5034 	OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
5035 
5036 	dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
5037 	dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
5038 
5039 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
5040 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
5041 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
5042 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
5043 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
5044 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
5045 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
5046 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
5047 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
5048 
5049 	return DBG_STATUS_OK;
5050 }
5051 
5052 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5053 											struct ecore_ptt *p_ptt,
5054 											u8 port_id,
5055 											u32 dest_addr_lo32,
5056 											u16 dest_addr_hi16,
5057 											u16 data_limit_size_kb,
5058 											bool send_to_other_engine,
5059 											bool rcv_from_other_engine)
5060 {
5061 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5062 
5063 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5064 
5065 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5066 		return DBG_STATUS_OUTPUT_ALREADY_SET;
5067 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5068 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5069 	if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5070 		return DBG_STATUS_INVALID_ARGS;
5071 
5072 	dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5073 	dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5074 
5075 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5076 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5077 
5078 	if (send_to_other_engine)
5079 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5080 	else
5081 		ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5082 
5083 	if (rcv_from_other_engine) {
5084 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5085 	}
5086 	else {
5087 
5088 		/* Configure ethernet header of 14 bytes */
5089 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5090 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5091 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5092 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5093 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5094 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5095 		if (data_limit_size_kb)
5096 			ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5097 	}
5098 
5099 	return DBG_STATUS_OK;
5100 }
5101 
5102 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5103 									  u8 enable_mask,
5104 									  u8 right_shift)
5105 {
5106 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5107 	u8 curr_shifted_enable_mask, shifted_enable_mask;
5108 	u32 block_id;
5109 
5110 	shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5111 
5112 	if (dev_data->bus.num_enabled_blocks) {
5113 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5114 			struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5115 
5116 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5117 				continue;
5118 
5119 			curr_shifted_enable_mask =
5120 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5121 					VALUES_PER_CYCLE,
5122 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5123 			if (shifted_enable_mask & curr_shifted_enable_mask)
5124 				return true;
5125 		}
5126 	}
5127 
5128 	return false;
5129 }
5130 
5131 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5132 										   enum block_id block_id,
5133 										   u8 line_num,
5134 										   u8 enable_mask,
5135 										   u8 right_shift,
5136 										   u8 force_valid_mask,
5137 										   u8 force_frame_mask)
5138 {
5139 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5140 	struct block_defs *block = s_block_defs[block_id];
5141 	struct dbg_bus_block_data *block_bus;
5142 	struct dbg_bus_block *block_desc;
5143 
5144 	block_bus = &dev_data->bus.blocks[block_id];
5145 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5146 
5147 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5148 
5149 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5150 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5151 	if (block_id >= MAX_BLOCK_ID)
5152 		return DBG_STATUS_INVALID_ARGS;
5153 	if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5154 		return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5155 	if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5156 		line_num >= NUM_DBG_LINES(block_desc) ||
5157 		!enable_mask ||
5158 		enable_mask > MAX_CYCLE_VALUES_MASK ||
5159 		force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5160 		force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5161 		right_shift > VALUES_PER_CYCLE - 1)
5162 		return DBG_STATUS_INVALID_ARGS;
5163 	if (dev_data->block_in_reset[block_id])
5164 		return DBG_STATUS_BLOCK_IN_RESET;
5165 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5166 		return DBG_STATUS_INPUT_OVERLAP;
5167 
5168 	dev_data->bus.blocks[block_id].line_num = line_num;
5169 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5170 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5171 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5172 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5173 
5174 	dev_data->bus.num_enabled_blocks++;
5175 
5176 	return DBG_STATUS_OK;
5177 }
5178 
5179 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5180 										   enum dbg_storms storm_id,
5181 										   enum dbg_bus_storm_modes storm_mode)
5182 {
5183 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5184 	struct dbg_bus_data *bus = &dev_data->bus;
5185 	struct dbg_bus_storm_data *storm_bus;
5186 	struct storm_defs *storm;
5187 
5188 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm_id, storm_mode);
5189 
5190 	if (bus->state != DBG_BUS_STATE_READY)
5191 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5192 	if (bus->hw_dwords >= 4)
5193 		return DBG_STATUS_HW_ONLY_RECORDING;
5194 	if (storm_id >= MAX_DBG_STORMS)
5195 		return DBG_STATUS_INVALID_ARGS;
5196 	if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5197 		return DBG_STATUS_INVALID_ARGS;
5198 	if (bus->unify_inputs)
5199 		return DBG_STATUS_INVALID_ARGS;
5200 	if (bus->storms[storm_id].enabled)
5201 		return DBG_STATUS_STORM_ALREADY_ENABLED;
5202 
5203 	storm = &s_storm_defs[storm_id];
5204 	storm_bus = &bus->storms[storm_id];
5205 
5206 	if (dev_data->block_in_reset[storm->block_id])
5207 		return DBG_STATUS_BLOCK_IN_RESET;
5208 
5209 	storm_bus->enabled = true;
5210 	storm_bus->mode = (u8)storm_mode;
5211 	storm_bus->hw_id = bus->num_enabled_storms;
5212 
5213 	bus->num_enabled_storms++;
5214 
5215 	return DBG_STATUS_OK;
5216 }
5217 
5218 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5219 											   struct ecore_ptt *p_ptt,
5220 											   u8 valid_mask,
5221 											   u8 frame_mask,
5222 											   u32 tick_len)
5223 {
5224 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5225 
5226 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5227 
5228 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5229 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5230 	if (valid_mask > 0x7 || frame_mask > 0x7)
5231 		return DBG_STATUS_INVALID_ARGS;
5232 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5233 		return DBG_STATUS_INPUT_OVERLAP;
5234 
5235 	dev_data->bus.timestamp_input_en = true;
5236 	dev_data->bus.num_enabled_blocks++;
5237 
5238 	SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5239 
5240 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5241 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5242 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5243 
5244 	return DBG_STATUS_OK;
5245 }
5246 
5247 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5248 													   enum dbg_storms storm_id,
5249 													   u8 min_eid,
5250 													   u8 max_eid)
5251 {
5252 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5253 	struct dbg_bus_storm_data *storm_bus;
5254 
5255 	storm_bus = &dev_data->bus.storms[storm_id];
5256 
5257 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5258 
5259 	if (storm_id >= MAX_DBG_STORMS)
5260 		return DBG_STATUS_INVALID_ARGS;
5261 	if (min_eid > max_eid)
5262 		return DBG_STATUS_INVALID_ARGS;
5263 	if (!storm_bus->enabled)
5264 		return DBG_STATUS_STORM_NOT_ENABLED;
5265 
5266 	storm_bus->eid_filter_en = 1;
5267 	storm_bus->eid_range_not_mask = 1;
5268 	storm_bus->eid_filter_params.range.min = min_eid;
5269 	storm_bus->eid_filter_params.range.max = max_eid;
5270 
5271 	return DBG_STATUS_OK;
5272 }
5273 
5274 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5275 													  enum dbg_storms storm_id,
5276 													  u8 eid_val,
5277 													  u8 eid_mask)
5278 {
5279 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5280 	struct dbg_bus_storm_data *storm_bus;
5281 
5282 	storm_bus = &dev_data->bus.storms[storm_id];
5283 
5284 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5285 
5286 	if (storm_id >= MAX_DBG_STORMS)
5287 		return DBG_STATUS_INVALID_ARGS;
5288 	if (!storm_bus->enabled)
5289 		return DBG_STATUS_STORM_NOT_ENABLED;
5290 
5291 	storm_bus->eid_filter_en = 1;
5292 	storm_bus->eid_range_not_mask = 0;
5293 	storm_bus->eid_filter_params.mask.val = eid_val;
5294 	storm_bus->eid_filter_params.mask.mask = eid_mask;
5295 
5296 	return DBG_STATUS_OK;
5297 }
5298 
5299 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5300 												 enum dbg_storms storm_id,
5301 												 u32 cid)
5302 {
5303 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5304 	struct dbg_bus_storm_data *storm_bus;
5305 
5306 	storm_bus = &dev_data->bus.storms[storm_id];
5307 
5308 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5309 
5310 	if (storm_id >= MAX_DBG_STORMS)
5311 		return DBG_STATUS_INVALID_ARGS;
5312 	if (!storm_bus->enabled)
5313 		return DBG_STATUS_STORM_NOT_ENABLED;
5314 
5315 	storm_bus->cid_filter_en = 1;
5316 	storm_bus->cid = cid;
5317 
5318 	return DBG_STATUS_OK;
5319 }
5320 
5321 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5322 											struct ecore_ptt *p_ptt,
5323 											enum block_id block_id,
5324 											u8 const_msg_len)
5325 {
5326 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5327 
5328 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5329 
5330 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5331 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5332 	if (dev_data->bus.filter_en)
5333 		return DBG_STATUS_FILTER_ALREADY_ENABLED;
5334 	if (block_id >= MAX_BLOCK_ID)
5335 		return DBG_STATUS_INVALID_ARGS;
5336 	if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5337 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5338 	if (!dev_data->bus.unify_inputs)
5339 		return DBG_STATUS_FILTER_BUG;
5340 
5341 	dev_data->bus.filter_en = true;
5342 	dev_data->bus.next_constraint_id = 0;
5343 	dev_data->bus.adding_filter = true;
5344 
5345 	/* HW ID is set to 0 due to required unifyInputs */
5346 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5347 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5348 	if (const_msg_len > 0)
5349 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5350 
5351 	return DBG_STATUS_OK;
5352 }
5353 
5354 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5355 											 struct ecore_ptt *p_ptt,
5356 											 bool rec_pre_trigger,
5357 											 u8 pre_chunks,
5358 											 bool rec_post_trigger,
5359 											 u32 post_cycles,
5360 											 bool filter_pre_trigger,
5361 											 bool filter_post_trigger)
5362 {
5363 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5364 	enum dbg_bus_post_trigger_types post_trigger_type;
5365 	enum dbg_bus_pre_trigger_types pre_trigger_type;
5366 	struct dbg_bus_data *bus = &dev_data->bus;
5367 
5368 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5369 
5370 	if (bus->state != DBG_BUS_STATE_READY)
5371 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5372 	if (bus->trigger_en)
5373 		return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5374 	if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5375 		return DBG_STATUS_INVALID_ARGS;
5376 
5377 	bus->trigger_en = true;
5378 	bus->filter_pre_trigger = filter_pre_trigger;
5379 	bus->filter_post_trigger = filter_post_trigger;
5380 
5381 	if (rec_pre_trigger) {
5382 		pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5383 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5384 	}
5385 	else {
5386 		pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5387 	}
5388 
5389 	if (rec_post_trigger) {
5390 		post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5391 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5392 	}
5393 	else {
5394 		post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5395 	}
5396 
5397 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5398 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5399 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5400 
5401 	return DBG_STATUS_OK;
5402 }
5403 
5404 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5405 												struct ecore_ptt *p_ptt,
5406 												enum block_id block_id,
5407 												u8 const_msg_len,
5408 												u16 count_to_next)
5409 {
5410 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5411 	struct dbg_bus_data *bus = &dev_data->bus;
5412 	struct dbg_bus_block_data *block_bus;
5413 	u8 reg_offset;
5414 
5415 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5416 
5417 	block_bus = &bus->blocks[block_id];
5418 
5419 	if (!bus->trigger_en)
5420 		return DBG_STATUS_TRIGGER_NOT_ENABLED;
5421 	if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5422 		return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5423 	if (block_id >= MAX_BLOCK_ID)
5424 		return DBG_STATUS_INVALID_ARGS;
5425 	if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5426 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5427 	if (!count_to_next)
5428 		return DBG_STATUS_INVALID_ARGS;
5429 
5430 	bus->next_constraint_id = 0;
5431 	bus->adding_filter = false;
5432 
5433 	/* Store block's shifted enable mask */
5434 	SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5435 					   VALUES_PER_CYCLE,
5436 					   GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5437 
5438 	/* Set trigger state registers */
5439 	reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5440 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5441 	if (const_msg_len > 0)
5442 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5443 
5444 	/* Set trigger set registers */
5445 	reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5446 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5447 
5448 	/* Set next state to final state, and overwrite previous next state
5449 	 * (if any).
5450 	 */
5451 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5452 	if (bus->next_trigger_state > 0) {
5453 		reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5454 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5455 	}
5456 
5457 	bus->next_trigger_state++;
5458 
5459 	return DBG_STATUS_OK;
5460 }
5461 
5462 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5463 											 struct ecore_ptt *p_ptt,
5464 											 enum dbg_bus_constraint_ops constraint_op,
5465 											 u32 data_val,
5466 											 u32 data_mask,
5467 											 bool compare_frame,
5468 											 u8 frame_bit,
5469 											 u8 cycle_offset,
5470 											 u8 dword_offset_in_cycle,
5471 											 bool is_mandatory)
5472 {
5473 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5474 	struct dbg_bus_data *bus = &dev_data->bus;
5475 	u16 dword_offset, range = 0;
5476 
5477 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5478 
5479 	if (!bus->filter_en && !dev_data->bus.trigger_en)
5480 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5481 	if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5482 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5483 	if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5484 		return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5485 	if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5486 		return DBG_STATUS_INVALID_ARGS;
5487 	if (compare_frame &&
5488 		constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5489 		constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5490 		return DBG_STATUS_INVALID_ARGS;
5491 
5492 	dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5493 
5494 	if (!bus->adding_filter) {
5495 		u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5496 		struct dbg_bus_trigger_state_data *trigger_state;
5497 
5498 		trigger_state = &bus->trigger_states[curr_trigger_state_id];
5499 
5500 		/* Check if the selected dword is enabled in the block */
5501 		if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5502 			return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5503 
5504 		/* Add selected dword to trigger state's dword mask */
5505 		SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5506 	}
5507 
5508 	/* Prepare data mask and range */
5509 	if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5510 		constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5511 		data_mask = ~data_mask;
5512 	}
5513 	else {
5514 		u8 lsb, width;
5515 
5516 		/* Extract lsb and width from mask */
5517 		if (!data_mask)
5518 			return DBG_STATUS_INVALID_ARGS;
5519 
5520 		for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5521 		for (width = 0; width < 32 - lsb && (data_mask & 1); width++, data_mask >>= 1);
5522 		if (data_mask)
5523 			return DBG_STATUS_INVALID_ARGS;
5524 		range = (lsb << 5) | (width - 1);
5525 	}
5526 
5527 	/* Add constraint */
5528 	ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5529 		dev_data->bus.next_constraint_id,
5530 		s_constraint_op_defs[constraint_op].hw_op_val,
5531 		data_val, data_mask, frame_bit,
5532 		compare_frame ? 0 : 1, dword_offset, range,
5533 		s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5534 		is_mandatory ? 1 : 0);
5535 
5536 	/* If first constraint, fill other 3 constraints with dummy constraints
5537 	 * that always match (using the same offset).
5538 	 */
5539 	if (!dev_data->bus.next_constraint_id) {
5540 		u8 i;
5541 
5542 		for (i = 1; i < MAX_CONSTRAINTS; i++)
5543 			ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5544 				i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5545 				0, 1, dword_offset, 0, 0, 1);
5546 	}
5547 
5548 	bus->next_constraint_id++;
5549 
5550 	return DBG_STATUS_OK;
5551 }
5552 
5553 /* Configure the DBG block client mask */
5554 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5555 										struct ecore_ptt *p_ptt)
5556 {
5557 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5558 	struct dbg_bus_data *bus = &dev_data->bus;
5559 	u32 block_id, client_mask = 0;
5560 	u8 storm_id;
5561 
5562 	/* Update client mask for Storm inputs */
5563 	if (bus->num_enabled_storms)
5564 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5565 			struct storm_defs *storm = &s_storm_defs[storm_id];
5566 
5567 			if (bus->storms[storm_id].enabled)
5568 				client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5569 		}
5570 
5571 	/* Update client mask for block inputs */
5572 	if (bus->num_enabled_blocks) {
5573 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5574 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5575 			struct block_defs *block = s_block_defs[block_id];
5576 
5577 			if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5578 				client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5579 		}
5580 	}
5581 
5582 	/* Update client mask for GRC input */
5583 	if (bus->grc_input_en)
5584 		client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5585 
5586 	/* Update client mask for timestamp input */
5587 	if (bus->timestamp_input_en)
5588 		client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5589 
5590 	ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5591 }
5592 
5593 /* Configure the DBG block framing mode */
5594 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5595 													struct ecore_ptt *p_ptt)
5596 {
5597 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5598 	struct dbg_bus_data *bus = &dev_data->bus;
5599 	enum dbg_bus_frame_modes dbg_framing_mode;
5600 	u32 block_id;
5601 
5602 	if (!bus->hw_dwords && bus->num_enabled_blocks) {
5603 		struct dbg_bus_line *line_desc;
5604 		u8 hw_dwords;
5605 
5606 		/* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5607 		 * (256-bit mode).
5608 		 */
5609 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5610 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5611 
5612 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5613 				continue;
5614 
5615 			line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5616 			hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5617 
5618 			if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5619 				return DBG_STATUS_NON_MATCHING_LINES;
5620 
5621 			/* The DBG block doesn't support triggers and
5622 			 * filters on 256b debug lines.
5623 			 */
5624 			if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5625 				return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5626 
5627 			bus->hw_dwords = hw_dwords;
5628 		}
5629 	}
5630 
5631 	switch (bus->hw_dwords) {
5632 	case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5633 	case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5634 	case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5635 	default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5636 	}
5637 	ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5638 
5639 	return DBG_STATUS_OK;
5640 }
5641 
5642 /* Configure the DBG block Storm data */
5643 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5644 										  struct ecore_ptt *p_ptt)
5645 {
5646 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5647 	struct dbg_bus_data *bus = &dev_data->bus;
5648 	u8 storm_id, i, next_storm_id = 0;
5649 	u32 storm_id_mask = 0;
5650 
5651 	/* Check if SEMI sync FIFO is empty */
5652 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5653 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5654 		struct storm_defs *storm = &s_storm_defs[storm_id];
5655 
5656 		if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5657 			return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5658 	}
5659 
5660 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5661 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5662 
5663 		if (storm_bus->enabled)
5664 			storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5665 	}
5666 
5667 	ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5668 
5669 	/* Disable storm stall if recording to internal buffer in one-shot */
5670 	ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5671 
5672 	/* Configure calendar */
5673 	for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5674 
5675 		/* Find next enabled Storm */
5676 		for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5677 
5678 		/* Configure calendar slot */
5679 		ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5680 	}
5681 
5682 	return DBG_STATUS_OK;
5683 }
5684 
5685 /* Assign HW ID to each dword/qword:
5686  * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5687  * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5688  * data synchronization issues. however, we need to check if there is a trigger
5689  * state for which more than one dword has a constraint. if there is, we cannot
5690  * assign a different HW ID to each dword (since a trigger state has a single
5691  * HW ID), so we assign a different HW ID to each block.
5692  */
5693 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5694 						 u8 hw_ids[VALUES_PER_CYCLE])
5695 {
5696 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5697 	struct dbg_bus_data *bus = &dev_data->bus;
5698 	bool hw_id_per_dword = true;
5699 	u8 val_id, state_id;
5700 	u32 block_id;
5701 
5702 	OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5703 
5704 	if (bus->unify_inputs)
5705 		return;
5706 
5707 	if (bus->trigger_en) {
5708 		for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5709 			u8 num_dwords = 0;
5710 
5711 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5712 				if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5713 					num_dwords++;
5714 
5715 			if (num_dwords > 1)
5716 				hw_id_per_dword = false;
5717 		}
5718 	}
5719 
5720 	if (hw_id_per_dword) {
5721 
5722 		/* Assign a different HW ID for each dword */
5723 		for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5724 			hw_ids[val_id] = val_id;
5725 	}
5726 	else {
5727 		u8 shifted_enable_mask, next_hw_id = 0;
5728 
5729 		/* Assign HW IDs according to blocks enable /  */
5730 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5731 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5732 
5733 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5734 				continue;
5735 
5736 			block_bus->hw_id = next_hw_id++;
5737 			if (!block_bus->hw_id)
5738 				continue;
5739 
5740 			shifted_enable_mask =
5741 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5742 					VALUES_PER_CYCLE,
5743 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5744 
5745 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5746 				if (shifted_enable_mask & (1 << val_id))
5747 					hw_ids[val_id] = block_bus->hw_id;
5748 		}
5749 	}
5750 }
5751 
5752 /* Configure the DBG block HW blocks data */
5753 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5754 							   struct ecore_ptt *p_ptt)
5755 {
5756 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5757 	struct dbg_bus_data *bus = &dev_data->bus;
5758 	u8 hw_ids[VALUES_PER_CYCLE];
5759 	u8 val_id, state_id;
5760 
5761 	ecore_assign_hw_ids(p_hwfn, hw_ids);
5762 
5763 	/* Assign a HW ID to each trigger state */
5764 	if (dev_data->bus.trigger_en) {
5765 		for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5766 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5767 				u8 state_data = bus->trigger_states[state_id].data;
5768 
5769 				if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5770 					ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5771 					break;
5772 				}
5773 			}
5774 		}
5775 	}
5776 
5777 	/* Configure HW ID mask */
5778 	dev_data->bus.hw_id_mask = 0;
5779 	for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5780 		bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5781 	ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5782 
5783 	/* Configure additional K2 PCIE registers */
5784 	if (dev_data->chip_id == CHIP_K2 &&
5785 		(GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5786 			GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5787 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5788 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5789 	}
5790 }
5791 
5792 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5793 									struct ecore_ptt *p_ptt)
5794 {
5795 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5796 	struct dbg_bus_data *bus = &dev_data->bus;
5797 	enum dbg_bus_filter_types filter_type;
5798 	enum dbg_status status;
5799 	u32 block_id;
5800 	u8 storm_id;
5801 
5802 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5803 
5804 	if (bus->state != DBG_BUS_STATE_READY)
5805 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5806 
5807 	/* Check if any input was enabled */
5808 	if (!bus->num_enabled_storms &&
5809 		!bus->num_enabled_blocks &&
5810 		!bus->rcv_from_other_engine)
5811 		return DBG_STATUS_NO_INPUT_ENABLED;
5812 
5813 	/* Check if too many input types were enabled (storm+dbgmux) */
5814 	if (bus->num_enabled_storms && bus->num_enabled_blocks)
5815 		return DBG_STATUS_TOO_MANY_INPUTS;
5816 
5817 	/* Configure framing mode */
5818 	if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5819 		return status;
5820 
5821 	/* Configure DBG block for Storm inputs */
5822 	if (bus->num_enabled_storms)
5823 		if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5824 			return status;
5825 
5826 	/* Configure DBG block for block inputs */
5827 	if (bus->num_enabled_blocks)
5828 		ecore_config_block_inputs(p_hwfn, p_ptt);
5829 
5830 	/* Configure filter type */
5831 	if (bus->filter_en) {
5832 		if (bus->trigger_en) {
5833 			if (bus->filter_pre_trigger)
5834 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5835 			else
5836 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5837 		}
5838 		else {
5839 			filter_type = DBG_BUS_FILTER_TYPE_ON;
5840 		}
5841 	}
5842 	else {
5843 		filter_type = DBG_BUS_FILTER_TYPE_OFF;
5844 	}
5845 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5846 
5847 	/* Restart timestamp */
5848 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5849 
5850 	/* Enable debug block */
5851 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5852 
5853 	/* Configure enabled blocks - must be done before the DBG block is
5854 	 * enabled.
5855 	 */
5856 	if (dev_data->bus.num_enabled_blocks) {
5857 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5858 			if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5859 				continue;
5860 
5861 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5862 				dev_data->bus.blocks[block_id].line_num,
5863 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5864 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5865 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5866 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5867 		}
5868 	}
5869 
5870 	/* Configure client mask */
5871 	ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5872 
5873 	/* Configure enabled Storms - must be done after the DBG block is
5874 	 * enabled.
5875 	 */
5876 	if (dev_data->bus.num_enabled_storms)
5877 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5878 			if (dev_data->bus.storms[storm_id].enabled)
5879 				ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id);
5880 
5881 	dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5882 
5883 	return DBG_STATUS_OK;
5884 }
5885 
5886 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5887 								   struct ecore_ptt *p_ptt)
5888 {
5889 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5890 	struct dbg_bus_data *bus = &dev_data->bus;
5891 	enum dbg_status status = DBG_STATUS_OK;
5892 
5893 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5894 
5895 	if (bus->state != DBG_BUS_STATE_RECORDING)
5896 		return DBG_STATUS_RECORDING_NOT_STARTED;
5897 
5898 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5899 	if (status != DBG_STATUS_OK)
5900 		return status;
5901 
5902 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5903 
5904 	OSAL_MSLEEP(FLUSH_DELAY_MS);
5905 
5906 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5907 
5908 	/* Check if trigger worked */
5909 	if (bus->trigger_en) {
5910 		u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5911 
5912 		if (trigger_state != MAX_TRIGGER_STATES)
5913 			return DBG_STATUS_DATA_DIDNT_TRIGGER;
5914 	}
5915 
5916 	bus->state = DBG_BUS_STATE_STOPPED;
5917 
5918 	return status;
5919 }
5920 
5921 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5922 												struct ecore_ptt *p_ptt,
5923 												u32 *buf_size)
5924 {
5925 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5926 	struct dbg_bus_data *bus = &dev_data->bus;
5927 	enum dbg_status status;
5928 
5929 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5930 
5931 	*buf_size = 0;
5932 
5933 	if (status != DBG_STATUS_OK)
5934 		return status;
5935 
5936 	/* Add dump header */
5937 	*buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5938 
5939 	switch (bus->target) {
5940 	case DBG_BUS_TARGET_ID_INT_BUF:
5941 		*buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5942 	case DBG_BUS_TARGET_ID_PCI:
5943 		*buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5944 	default:
5945 		break;
5946 	}
5947 
5948 	/* Dump last section */
5949 	*buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5950 
5951 	return DBG_STATUS_OK;
5952 }
5953 
5954 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5955 								   struct ecore_ptt *p_ptt,
5956 								   u32 *dump_buf,
5957 								   u32 buf_size_in_dwords,
5958 								   u32 *num_dumped_dwords)
5959 {
5960 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5961 	u32 min_buf_size_in_dwords, block_id, offset = 0;
5962 	struct dbg_bus_data *bus = &dev_data->bus;
5963 	enum dbg_status status;
5964 	u8 storm_id;
5965 
5966 	*num_dumped_dwords = 0;
5967 
5968 	status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5969 	if (status != DBG_STATUS_OK)
5970 		return status;
5971 
5972 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5973 
5974 	if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5975 		return DBG_STATUS_RECORDING_NOT_STARTED;
5976 
5977 	if (bus->state == DBG_BUS_STATE_RECORDING) {
5978 		enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5979 		if (stop_state != DBG_STATUS_OK)
5980 			return stop_state;
5981 	}
5982 
5983 	if (buf_size_in_dwords < min_buf_size_in_dwords)
5984 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5985 
5986 	if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5987 		return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5988 
5989 	/* Dump header */
5990 	offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5991 
5992 	/* Dump recorded data */
5993 	if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5994 		u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5995 
5996 		if (!recorded_dwords)
5997 			return DBG_STATUS_NO_DATA_RECORDED;
5998 		if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5999 			return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
6000 		offset += recorded_dwords;
6001 	}
6002 
6003 	/* Dump last section */
6004 	offset += ecore_dump_last_section(dump_buf, offset, true);
6005 
6006 	/* If recorded to PCI buffer - free the buffer */
6007 	ecore_bus_free_pci_buf(p_hwfn);
6008 
6009 	/* Clear debug bus parameters */
6010 	bus->state = DBG_BUS_STATE_IDLE;
6011 	bus->num_enabled_blocks = 0;
6012 	bus->num_enabled_storms = 0;
6013 	bus->filter_en = bus->trigger_en = 0;
6014 
6015 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
6016 		SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
6017 
6018 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
6019 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
6020 
6021 		storm_bus->enabled = false;
6022 		storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
6023 	}
6024 
6025 	*num_dumped_dwords = offset;
6026 
6027 	return DBG_STATUS_OK;
6028 }
6029 
6030 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
6031 									 enum dbg_grc_params grc_param,
6032 									 u32 val)
6033 {
6034 	int i;
6035 
6036 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
6037 
6038 	/* Initializes the GRC parameters (if not initialized). Needed in order
6039 	 * to set the default parameter values for the first time.
6040 	 */
6041 	ecore_dbg_grc_init_params(p_hwfn);
6042 
6043 	if (grc_param >= MAX_DBG_GRC_PARAMS)
6044 		return DBG_STATUS_INVALID_ARGS;
6045 	if (val < s_grc_param_defs[grc_param].min ||
6046 		val > s_grc_param_defs[grc_param].max)
6047 		return DBG_STATUS_INVALID_ARGS;
6048 
6049 	if (s_grc_param_defs[grc_param].is_preset) {
6050 
6051 		/* Preset param */
6052 
6053 		/* Disabling a preset is not allowed. Call
6054 		 * dbg_grc_set_params_default instead.
6055 		 */
6056 		if (!val)
6057 			return DBG_STATUS_INVALID_ARGS;
6058 
6059 		/* Update all params with the preset values */
6060 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6061 			u32 preset_val;
6062 
6063 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6064 				preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6065 			else if (grc_param == DBG_GRC_PARAM_CRASH)
6066 				preset_val = s_grc_param_defs[i].crash_preset_val;
6067 			else
6068 				return DBG_STATUS_INVALID_ARGS;
6069 
6070 			ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6071 		}
6072 	}
6073 	else {
6074 
6075 		/* Regular param - set its value */
6076 		ecore_grc_set_param(p_hwfn, grc_param, val);
6077 	}
6078 
6079 	return DBG_STATUS_OK;
6080 }
6081 
6082 /* Assign default GRC param values */
6083 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6084 {
6085 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6086 	u32 i;
6087 
6088 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6089 		dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6090 }
6091 
6092 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6093 												struct ecore_ptt *p_ptt,
6094 												u32 *buf_size)
6095 {
6096 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6097 
6098 	*buf_size = 0;
6099 
6100 	if (status != DBG_STATUS_OK)
6101 		return status;
6102 
6103 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6104 		!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6105 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6106 
6107 	return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6108 }
6109 
6110 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6111 								   struct ecore_ptt *p_ptt,
6112 								   u32 *dump_buf,
6113 								   u32 buf_size_in_dwords,
6114 								   u32 *num_dumped_dwords)
6115 {
6116 	u32 needed_buf_size_in_dwords;
6117 	enum dbg_status status;
6118 
6119 	*num_dumped_dwords = 0;
6120 
6121 	status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6122 	if (status != DBG_STATUS_OK)
6123 		return status;
6124 
6125 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6126 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6127 
6128 	/* Doesn't do anything, needed for compile time asserts */
6129 	ecore_static_asserts();
6130 
6131 	/* GRC Dump */
6132 	status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6133 
6134 	/* Reveret GRC params to their default */
6135 	ecore_dbg_grc_set_params_default(p_hwfn);
6136 
6137 	return status;
6138 }
6139 
6140 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6141 													 struct ecore_ptt *p_ptt,
6142 													 u32 *buf_size)
6143 {
6144 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6145 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6146 	enum dbg_status status;
6147 
6148 	*buf_size = 0;
6149 
6150 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6151 	if (status != DBG_STATUS_OK)
6152 		return status;
6153 
6154 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6155 		!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6156 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6157 
6158 	if (!idle_chk->buf_size_set) {
6159 		idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6160 		idle_chk->buf_size_set = true;
6161 	}
6162 
6163 	*buf_size = idle_chk->buf_size;
6164 
6165 	return DBG_STATUS_OK;
6166 }
6167 
6168 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6169 										struct ecore_ptt *p_ptt,
6170 										u32 *dump_buf,
6171 										u32 buf_size_in_dwords,
6172 										u32 *num_dumped_dwords)
6173 {
6174 	u32 needed_buf_size_in_dwords;
6175 	enum dbg_status status;
6176 
6177 	*num_dumped_dwords = 0;
6178 
6179 	status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6180 	if (status != DBG_STATUS_OK)
6181 		return status;
6182 
6183 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6184 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6185 
6186 	/* Update reset state */
6187 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6188 
6189 	/* Idle Check Dump */
6190 	*num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6191 
6192 	/* Reveret GRC params to their default */
6193 	ecore_dbg_grc_set_params_default(p_hwfn);
6194 
6195 	return DBG_STATUS_OK;
6196 }
6197 
6198 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6199 													  struct ecore_ptt *p_ptt,
6200 													  u32 *buf_size)
6201 {
6202 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6203 
6204 	*buf_size = 0;
6205 
6206 	if (status != DBG_STATUS_OK)
6207 		return status;
6208 
6209 	return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6210 }
6211 
6212 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6213 										 struct ecore_ptt *p_ptt,
6214 										 u32 *dump_buf,
6215 										 u32 buf_size_in_dwords,
6216 										 u32 *num_dumped_dwords)
6217 {
6218 	u32 needed_buf_size_in_dwords;
6219 	enum dbg_status status;
6220 
6221 	status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6222 	if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6223 		return status;
6224 
6225 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6226 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6227 
6228 	/* Update reset state */
6229 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6230 
6231 	/* Perform dump */
6232 	status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6233 
6234 	/* Reveret GRC params to their default */
6235 	ecore_dbg_grc_set_params_default(p_hwfn);
6236 
6237 	return status;
6238 }
6239 
6240 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6241 													 struct ecore_ptt *p_ptt,
6242 													 u32 *buf_size)
6243 {
6244 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6245 
6246 	*buf_size = 0;
6247 
6248 	if (status != DBG_STATUS_OK)
6249 		return status;
6250 
6251 	return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6252 }
6253 
6254 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6255 										struct ecore_ptt *p_ptt,
6256 										u32 *dump_buf,
6257 										u32 buf_size_in_dwords,
6258 										u32 *num_dumped_dwords)
6259 {
6260 	u32 needed_buf_size_in_dwords;
6261 	enum dbg_status status;
6262 
6263 	*num_dumped_dwords = 0;
6264 
6265 	status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6266 	if (status != DBG_STATUS_OK)
6267 		return status;
6268 
6269 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6270 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6271 
6272 	/* Update reset state */
6273 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6274 
6275 	status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6276 
6277 	/* Reveret GRC params to their default */
6278 	ecore_dbg_grc_set_params_default(p_hwfn);
6279 
6280 	return status;
6281 }
6282 
6283 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6284 													 struct ecore_ptt *p_ptt,
6285 													 u32 *buf_size)
6286 {
6287 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6288 
6289 	*buf_size = 0;
6290 
6291 	if (status != DBG_STATUS_OK)
6292 		return status;
6293 
6294 	return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6295 }
6296 
6297 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6298 										struct ecore_ptt *p_ptt,
6299 										u32 *dump_buf,
6300 										u32 buf_size_in_dwords,
6301 										u32 *num_dumped_dwords)
6302 {
6303 	u32 needed_buf_size_in_dwords;
6304 	enum dbg_status status;
6305 
6306 	*num_dumped_dwords = 0;
6307 
6308 	status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6309 	if (status != DBG_STATUS_OK)
6310 		return status;
6311 
6312 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6313 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6314 
6315 	/* Update reset state */
6316 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6317 
6318 	status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6319 
6320 	/* Reveret GRC params to their default */
6321 	ecore_dbg_grc_set_params_default(p_hwfn);
6322 
6323 	return status;
6324 }
6325 
6326 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6327 																struct ecore_ptt *p_ptt,
6328 																u32 *buf_size)
6329 {
6330 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6331 
6332 	*buf_size = 0;
6333 
6334 	if (status != DBG_STATUS_OK)
6335 		return status;
6336 
6337 	return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6338 }
6339 
6340 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6341 												   struct ecore_ptt *p_ptt,
6342 												   u32 *dump_buf,
6343 												   u32 buf_size_in_dwords,
6344 												   u32 *num_dumped_dwords)
6345 {
6346 	u32 needed_buf_size_in_dwords;
6347 	enum dbg_status status;
6348 
6349 	*num_dumped_dwords = 0;
6350 
6351 	status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6352 	if (status != DBG_STATUS_OK)
6353 		return status;
6354 
6355 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6356 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6357 
6358 	/* Update reset state */
6359 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6360 
6361 	status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6362 
6363 	/* Reveret GRC params to their default */
6364 	ecore_dbg_grc_set_params_default(p_hwfn);
6365 
6366 	return status;
6367 }
6368 
6369 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6370 													   struct ecore_ptt *p_ptt,
6371 													   u32 *buf_size)
6372 {
6373 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6374 
6375 	*buf_size = 0;
6376 
6377 	if (status != DBG_STATUS_OK)
6378 		return status;
6379 
6380 	/* Update reset state */
6381 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6382 
6383 	*buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6384 
6385 	return DBG_STATUS_OK;
6386 }
6387 
6388 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6389 										  struct ecore_ptt *p_ptt,
6390 										  u32 *dump_buf,
6391 										  u32 buf_size_in_dwords,
6392 										  u32 *num_dumped_dwords)
6393 {
6394 	u32 needed_buf_size_in_dwords;
6395 	enum dbg_status status;
6396 
6397 	*num_dumped_dwords = 0;
6398 
6399 	status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6400 	if (status != DBG_STATUS_OK)
6401 		return status;
6402 
6403 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6404 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6405 
6406 	*num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6407 
6408 	/* Reveret GRC params to their default */
6409 	ecore_dbg_grc_set_params_default(p_hwfn);
6410 
6411 	return DBG_STATUS_OK;
6412 }
6413 
6414 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6415 									struct ecore_ptt *p_ptt,
6416 									enum block_id block_id,
6417 									enum dbg_attn_type attn_type,
6418 									bool clear_status,
6419 									struct dbg_attn_block_result *results)
6420 {
6421 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6422 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
6423 	const struct dbg_attn_reg *attn_reg_arr;
6424 
6425 	if (status != DBG_STATUS_OK)
6426 		return status;
6427 
6428 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6429 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6430 
6431 	attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6432 
6433 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6434 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6435 		struct dbg_attn_reg_result *reg_result;
6436 		u32 sts_addr, sts_val;
6437 		u16 modes_buf_offset;
6438 		bool eval_mode;
6439 
6440 		/* Check mode */
6441 		eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6442 		modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6443 		if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6444 			continue;
6445 
6446 		/* Mode match - read attention status register */
6447 		sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6448 		sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6449 		if (!sts_val)
6450 			continue;
6451 
6452 		/* Non-zero attention status - add to results */
6453 		reg_result = &results->reg_results[num_result_regs];
6454 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6455 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6456 		reg_result->block_attn_offset = reg_data->block_attn_offset;
6457 		reg_result->sts_val = sts_val;
6458 		reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6459 		num_result_regs++;
6460 	}
6461 
6462 	results->block_id = (u8)block_id;
6463 	results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6464 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6465 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6466 
6467 	return DBG_STATUS_OK;
6468 }
6469 
6470 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6471 									 struct dbg_attn_block_result *results)
6472 {
6473 	enum dbg_attn_type attn_type;
6474 	u8 num_regs, i;
6475 
6476 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6477 	attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6478 
6479 	for (i = 0; i < num_regs; i++) {
6480 		struct dbg_attn_reg_result *reg_result;
6481 		const char *attn_type_str;
6482 		u32 sts_addr;
6483 
6484 		reg_result = &results->reg_results[i];
6485 		attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6486 		sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6487 		DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6488 	}
6489 
6490 	return DBG_STATUS_OK;
6491 }
6492 
6493 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6494 							 struct ecore_ptt *p_ptt,
6495 							 enum block_id block_id)
6496 {
6497 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6498 	struct block_defs *block = s_block_defs[block_id];
6499 	u32 reset_reg;
6500 
6501 	if (!block->has_reset_bit)
6502 		return false;
6503 
6504 	reset_reg = block->reset_reg;
6505 
6506 	return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6507 		!(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) :	true;
6508 }
6509 
6510