1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "common_nvswitch.h"
25 #include "intr_nvswitch.h"
26 #include "regkey_nvswitch.h"
27 
28 #include "ls10/ls10.h"
29 #include "ls10/minion_ls10.h"
30 
31 #include "nvswitch/ls10/dev_ctrl_ip.h"
32 #include "nvswitch/ls10/dev_pri_masterstation_ip.h"
33 #include "nvswitch/ls10/dev_pri_hub_sys_ip.h"
34 #include "nvswitch/ls10/dev_pri_hub_sysb_ip.h"
35 #include "nvswitch/ls10/dev_pri_hub_prt_ip.h"
36 
37 #include "nvswitch/ls10/dev_npg_ip.h"
38 #include "nvswitch/ls10/dev_nport_ip.h"
39 #include "nvswitch/ls10/dev_route_ip.h"
40 #include "nvswitch/ls10/dev_ingress_ip.h"
41 #include "nvswitch/ls10/dev_sourcetrack_ip.h"
42 #include "nvswitch/ls10/dev_egress_ip.h"
43 #include "nvswitch/ls10/dev_tstate_ip.h"
44 #include "nvswitch/ls10/dev_multicasttstate_ip.h"
45 #include "nvswitch/ls10/dev_reductiontstate_ip.h"
46 
47 #include "nvswitch/ls10/dev_nvlw_ip.h"
48 #include "nvswitch/ls10/dev_minion_ip.h"
49 #include "nvswitch/ls10/dev_cpr_ip.h"
50 #include "nvswitch/ls10/dev_nvlipt_ip.h"
51 #include "nvswitch/ls10/dev_nvlipt_lnk_ip.h"
52 #include "nvswitch/ls10/dev_nvltlc_ip.h"
53 #include "nvswitch/ls10/dev_nvldl_ip.h"
54 
55 #include "nvswitch/ls10/dev_nxbar_tcp_global_ip.h"
56 #include "nvswitch/ls10/dev_nxbar_tile_ip.h"
57 #include "nvswitch/ls10/dev_nxbar_tileout_ip.h"
58 
59 #include "nvswitch/ls10/dev_ctrl_ip_addendum.h"
60 
61 static void
62 _nvswitch_construct_ecc_error_event_ls10
63 (
64     INFOROM_NVS_ECC_ERROR_EVENT *err_event,
65     NvU32  sxid,
66     NvU32  linkId,
67     NvBool bAddressValid,
68     NvU32  address,
69     NvBool bUncErr,
70     NvU32  errorCount
71 )
72 {
73     err_event->sxid          = sxid;
74     err_event->linkId        = linkId;
75     err_event->bAddressValid = bAddressValid;
76     err_event->address       = address;
77     err_event->bUncErr       = bUncErr;
78     err_event->errorCount    = errorCount;
79 }
80 
81 static void
82 _nvswitch_initialize_minion_interrupts
83 (
84     nvswitch_device *device,
85     NvU32 instance
86 )
87 {
88     NvU32 intrEn, localDiscoveredLinks, globalLink, i;
89     localDiscoveredLinks = 0;
90 
91     // Tree 1 (non-stall) is disabled until there is a need
92     NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _MINION_INTR_NONSTALL_EN, 0);
93 
94      // Tree 0 (stall) is where we route _all_ MINION interrupts for now
95     intrEn = DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FATAL,          _ENABLE) |
96              DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _NONFATAL,       _ENABLE) |
97              DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FALCON_STALL,   _ENABLE) |
98              DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FALCON_NOSTALL, _DISABLE);
99 
100     for (i = 0; i < NVSWITCH_LINKS_PER_MINION_LS10; ++i)
101     {
102         // get the global link number of the link we are iterating over
103         globalLink = (instance * NVSWITCH_LINKS_PER_MINION_LS10) + i;
104 
105         // the link is valid place bit in link mask
106         if (device->link[globalLink].valid)
107         {
108             localDiscoveredLinks |= NVBIT(i);
109         }
110     }
111 
112     intrEn = FLD_SET_DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK,
113                             localDiscoveredLinks, intrEn);
114 
115    {
116         // Disable interrupts only if explicitly requested to.  Default to enable.
117         if (device->regkeys.minion_intr != NV_SWITCH_REGKEY_MINION_INTERRUPTS_DISABLE)
118         {
119             NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _MINION_INTR_STALL_EN, intrEn);
120         }
121     }
122 }
123 
124 static void
125 _nvswitch_initialize_nvlipt_interrupts_ls10
126 (
127     nvswitch_device *device
128 )
129 {
130     NvU32 i;
131     NvU32 regval = 0;
132 
133     //
134     // NVLipt interrupt routing (NVLIPT_COMMON, NVLIPT_LNK, NVLDL, NVLTLC)
135     // will be initialized by MINION NVLPROD flow
136     //
137     // We must enable interrupts at the top levels in NVLW, NVLIPT_COMMON,
138     // NVLIPT_LNK and MINION
139     //
140 
141     // NVLW
142     regval = DRF_NUM(_NVLW_COMMON, _INTR_0_MASK, _FATAL,       0x1) |
143              DRF_NUM(_NVLW_COMMON, _INTR_0_MASK, _NONFATAL,    0x0) |
144              DRF_NUM(_NVLW_COMMON, _INTR_0_MASK, _CORRECTABLE, 0x0) |
145              DRF_NUM(_NVLW_COMMON, _INTR_0_MASK, _INTR0,       0x1) |
146              DRF_NUM(_NVLW_COMMON, _INTR_0_MASK, _INTR1,       0x0);
147     NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_COMMON, _INTR_0_MASK, regval);
148 
149     regval = DRF_NUM(_NVLW_COMMON, _INTR_1_MASK, _FATAL,       0x0) |
150              DRF_NUM(_NVLW_COMMON, _INTR_1_MASK, _NONFATAL,    0x1) |
151              DRF_NUM(_NVLW_COMMON, _INTR_1_MASK, _CORRECTABLE, 0x1) |
152              DRF_NUM(_NVLW_COMMON, _INTR_1_MASK, _INTR0,       0x0) |
153              DRF_NUM(_NVLW_COMMON, _INTR_1_MASK, _INTR1,       0x1);
154     NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_COMMON, _INTR_1_MASK, regval);
155 
156     regval = DRF_NUM(_NVLW_COMMON, _INTR_2_MASK, _FATAL,       0x0) |
157              DRF_NUM(_NVLW_COMMON, _INTR_2_MASK, _NONFATAL,    0x0) |
158              DRF_NUM(_NVLW_COMMON, _INTR_2_MASK, _CORRECTABLE, 0x0) |
159              DRF_NUM(_NVLW_COMMON, _INTR_2_MASK, _INTR0,       0x0) |
160              DRF_NUM(_NVLW_COMMON, _INTR_2_MASK, _INTR1,       0x0);
161     NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_COMMON, _INTR_2_MASK, regval);
162 
163     // NVLW link
164     for (i = 0; i < NV_NVLW_LINK_INTR_0_MASK__SIZE_1; i++)
165     {
166         regval = DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _FATAL,       0x1) |
167                  DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _NONFATAL,    0x0) |
168                  DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _CORRECTABLE, 0x0) |
169                  DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0,       0x1) |
170                  DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1,       0x0);
171         NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_LINK, _INTR_0_MASK(i), regval);
172 
173         regval = DRF_NUM(_NVLW_LINK, _INTR_1_MASK, _FATAL,       0x0) |
174                  DRF_NUM(_NVLW_LINK, _INTR_1_MASK, _NONFATAL,    0x1) |
175                  DRF_NUM(_NVLW_LINK, _INTR_1_MASK, _CORRECTABLE, 0x1) |
176                  DRF_NUM(_NVLW_LINK, _INTR_1_MASK, _INTR0,       0x0) |
177                  DRF_NUM(_NVLW_LINK, _INTR_1_MASK, _INTR1,       0x1);
178         NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_LINK, _INTR_1_MASK(i), regval);
179 
180         regval = DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _FATAL,       0x0) |
181                  DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _NONFATAL,    0x0) |
182                  DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _CORRECTABLE, 0x0) |
183                  DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0,       0x0) |
184                  DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1,       0x0);
185         NVSWITCH_BCAST_WR32_LS10(device, NVLW, _NVLW_LINK, _INTR_2_MASK(i), regval);
186     }
187 
188     // NVLIPT_COMMON
189     regval = DRF_NUM(_NVLIPT_COMMON, _INTR_CONTROL_COMMON, _INT0_EN, 0x1) |
190              DRF_NUM(_NVLIPT_COMMON, _INTR_CONTROL_COMMON, _INT1_EN, 0x1);
191 
192     NVSWITCH_BCAST_WR32_LS10(device, NVLIPT, _NVLIPT_COMMON, _INTR_CONTROL_COMMON, regval);
193 
194     // NVLIPT_LNK
195     regval = DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT0_EN, 0x1) |
196              DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT1_EN, 0x1);
197     NVSWITCH_BCAST_WR32_LS10(device, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK, regval);
198 
199     // NVLIPT_LNK_INTR_1
200     regval = DRF_NUM(_NVLIPT_LNK, _INTR_INT1_EN, _LINKSTATEREQUESTREADYSET, 0x1);
201     NVSWITCH_BCAST_WR32_LS10(device, NVLIPT_LNK, _NVLIPT_LNK, _INTR_INT1_EN, regval);
202 
203     // MINION
204     for (i = 0; i < NUM_MINION_ENGINE_LS10; ++i)
205     {
206         if (!NVSWITCH_ENG_VALID_LS10(device, MINION, i))
207         {
208             continue;
209         }
210 
211         _nvswitch_initialize_minion_interrupts(device,i);
212     }
213 
214     // CPR
215 
216     regval = NVSWITCH_ENG_RD32(device, CPR, _BCAST, 0, _CPR_SYS, _ERR_LOG_EN_0);
217     regval = FLD_SET_DRF(_CPR_SYS, _ERR_LOG_EN_0, _ENGINE_RESET_ERR, __PROD, regval);
218     NVSWITCH_ENG_WR32(device, CPR, _BCAST, 0, _CPR_SYS, _ERR_LOG_EN_0, regval);
219 
220     regval = DRF_DEF(_CPR_SYS, _NVLW_INTR_0_MASK, _CPR_INTR, _ENABLE) |
221           DRF_DEF(_CPR_SYS, _NVLW_INTR_0_MASK, _INTR0, _ENABLE);
222     NVSWITCH_ENG_WR32(device, CPR, _BCAST, 0, _CPR_SYS, _NVLW_INTR_0_MASK, regval);
223 
224     regval = DRF_DEF(_CPR_SYS, _NVLW_INTR_1_MASK, _CPR_INTR, _DISABLE) |
225           DRF_DEF(_CPR_SYS, _NVLW_INTR_1_MASK, _INTR1, _ENABLE);
226     NVSWITCH_ENG_WR32(device, CPR, _BCAST, 0, _CPR_SYS, _NVLW_INTR_1_MASK, regval);
227 
228     regval = DRF_DEF(_CPR_SYS, _NVLW_INTR_2_MASK, _CPR_INTR, _DISABLE) |
229           DRF_DEF(_CPR_SYS, _NVLW_INTR_2_MASK, _INTR2, _ENABLE);
230     NVSWITCH_ENG_WR32(device, CPR, _BCAST, 0, _CPR_SYS, _NVLW_INTR_2_MASK, regval);
231 }
232 
233 static void
234 _nvswitch_initialize_route_interrupts
235 (
236     nvswitch_device *device
237 )
238 {
239     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
240 
241     chip_device->intr_mask.route.fatal =
242         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _ROUTEBUFERR, _ENABLE)          |
243         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _GLT_ECC_DBE_ERR, _ENABLE)      |
244         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _PDCTRLPARERR, _ENABLE)         |
245         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _NVS_ECC_DBE_ERR, _ENABLE)      |
246         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _CDTPARERR, _ENABLE)            |
247         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _MCRID_ECC_DBE_ERR, _ENABLE)    |
248         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _EXTMCRID_ECC_DBE_ERR, _ENABLE) |
249         DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _RAM_ECC_DBE_ERR, _ENABLE);
250 
251     chip_device->intr_mask.route.nonfatal =
252         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _NOPORTDEFINEDERR, _ENABLE)         |
253         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _INVALIDROUTEPOLICYERR, _ENABLE)    |
254         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _GLT_ECC_LIMIT_ERR, _ENABLE)        |
255         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _NVS_ECC_LIMIT_ERR, _ENABLE)        |
256         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _MCRID_ECC_LIMIT_ERR, _ENABLE)      |
257         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _EXTMCRID_ECC_LIMIT_ERR, _ENABLE)   |
258         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _RAM_ECC_LIMIT_ERR, _ENABLE)        |
259         DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _INVALID_MCRID_ERR, _ENABLE);
260     // NOTE: _MC_TRIGGER_ERR is debug-use only
261 }
262 
263 static void
264 _nvswitch_initialize_ingress_interrupts
265 (
266     nvswitch_device *device
267 )
268 {
269     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
270 
271     chip_device->intr_mask.ingress[0].fatal =
272         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _CMDDECODEERR, _ENABLE)              |
273         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ECC_DBE_ERR, _ENABLE)  |
274         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_DBE_ERR, _ENABLE)    |
275         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _INVALIDVCSET, _ENABLE)              |
276         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _REMAPTAB_ECC_DBE_ERR, _ENABLE)      |
277         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RIDTAB_ECC_DBE_ERR, _ENABLE)        |
278         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RLANTAB_ECC_DBE_ERR, _ENABLE)       |
279         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_PARITY_ERR, _ENABLE)         |
280         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ECC_DBE_ERR, _ENABLE)  |
281         DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _MCREMAPTAB_ECC_DBE_ERR, _ENABLE);
282 
283     chip_device->intr_mask.ingress[0].nonfatal =
284         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REQCONTEXTMISMATCHERR, _ENABLE)    |
285         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ACLFAIL, _ENABLE)                  |
286         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_LIMIT_ERR, _ENABLE) |
287         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRBOUNDSERR, _ENABLE)            |
288         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RIDTABCFGERR, _ENABLE)             |
289         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RLANTABCFGERR, _ENABLE)            |
290         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REMAPTAB_ECC_LIMIT_ERR, _ENABLE)   |
291         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RIDTAB_ECC_LIMIT_ERR, _ENABLE)     |
292         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RLANTAB_ECC_LIMIT_ERR, _ENABLE)    |
293         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRTYPEERR, _ENABLE)              |
294         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_INDEX_ERR, _ENABLE)   |
295         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_INDEX_ERR, _ENABLE)   |
296         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_INDEX_ERR, _ENABLE)     |
297         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_REQCONTEXTMISMATCHERR, _ENABLE) |
298         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_REQCONTEXTMISMATCHERR, _ENABLE) |
299         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_REQCONTEXTMISMATCHERR, _ENABLE)   |
300         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ACLFAIL, _ENABLE)     |
301         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ACLFAIL, _ENABLE)     |
302         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_ACLFAIL, _ENABLE)       |
303         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ADDRBOUNDSERR, _ENABLE) |
304         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ADDRBOUNDSERR, _ENABLE) |
305         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_ADDRBOUNDSERR, _ENABLE);
306 
307     chip_device->intr_mask.ingress[1].fatal = 0;
308 
309     chip_device->intr_mask.ingress[1].nonfatal =
310         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTAREMAPTAB_ECC_LIMIT_ERR, _ENABLE) |
311         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTBREMAPTAB_ECC_LIMIT_ERR, _ENABLE) |
312         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREMAPTAB_ECC_LIMIT_ERR, _ENABLE)   |
313         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCCMDTOUCADDRERR, _ENABLE)           |
314         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _READMCREFLECTMEMERR, _ENABLE)        |
315         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTAREMAPTAB_ADDRTYPEERR, _ENABLE)   |
316         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTBREMAPTAB_ADDRTYPEERR, _ENABLE)   |
317         DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREMAPTAB_ADDRTYPEERR, _ENABLE);
318 }
319 
320 static void
321 _nvswitch_initialize_egress_interrupts
322 (
323     nvswitch_device *device
324 )
325 {
326     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
327 
328     chip_device->intr_mask.egress[0].fatal =
329         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _EGRESSBUFERR, _ENABLE)                 |
330         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _PKTROUTEERR, _ENABLE)                  |
331         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _SEQIDERR, _ENABLE)                     |
332         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_DBE_ERR, _ENABLE)        |
333         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_DBE_ERR, _ENABLE)      |
334         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOCCREDITOVFL, _ENABLE)             |
335         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _REQTGTIDMISMATCHERR, _ENABLE)          |
336         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RSPREQIDMISMATCHERR, _ENABLE)          |
337         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_HDR_PARITY_ERR, _ENABLE)         |
338         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_CREDIT_PARITY_ERR, _ENABLE)     |
339         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_FLITTYPE_MISMATCH_ERR, _ENABLE)  |
340         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _CREDIT_TIME_OUT_ERR, _ENABLE)          |
341         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _INVALIDVCSET_ERR, _ENABLE)             |
342         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_SIDEBAND_PD_PARITY_ERR, _ENABLE) |
343         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _URRSPERR, _ENABLE)                     |
344         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _HWRSPERR, _ENABLE);
345 
346     chip_device->intr_mask.egress[0].nonfatal =
347         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_LIMIT_ERR, _ENABLE)     |
348         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, _ENABLE)   |
349         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _PRIVRSPERR, _ENABLE)                  |
350         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RFU, _DISABLE);
351 
352     chip_device->intr_mask.egress[1].fatal =
353 
354         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_1, _MCRSPCTRLSTORE_ECC_DBE_ERR, _ENABLE)              |
355         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_1, _RBCTRLSTORE_ECC_DBE_ERR, _ENABLE)                 |
356         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_1, _MCREDSGT_ECC_DBE_ERR, _ENABLE)                    |
357         DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_1, _MCRSP_RAM_HDR_ECC_DBE_ERR, _ENABLE);
358 
359     chip_device->intr_mask.egress[1].nonfatal =
360         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, _ENABLE)       |
361         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCRSPCTRLSTORE_ECC_LIMIT_ERR, _ENABLE)            |
362         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _RBCTRLSTORE_ECC_LIMIT_ERR, _ENABLE)               |
363         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREDSGT_ECC_LIMIT_ERR, _ENABLE)                  |
364         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREDBUF_ECC_LIMIT_ERR, _ENABLE)                  |
365         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCRSP_RAM_HDR_ECC_LIMIT_ERR, _ENABLE)             |
366         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _NXBAR_REDUCTION_HDR_ECC_DBE_ERR, _ENABLE)         |
367         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _NXBAR_REDUCTION_HDR_PARITY_ERR, _ENABLE)          |
368         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _NXBAR_REDUCTION_FLITTYPE_MISMATCH_ERR, _ENABLE)   |
369         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREDBUF_ECC_DBE_ERR, _ENABLE)                    |
370         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCRSP_CNT_ERR, _ENABLE)                           |
371         DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_1, _RBRSP_CNT_ERR, _ENABLE);
372 }
373 
374 static void
375 _nvswitch_initialize_tstate_interrupts
376 (
377     nvswitch_device *device
378 )
379 {
380     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
381 
382     chip_device->intr_mask.tstate.fatal =
383         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOLBUFERR, _ENABLE)              |
384         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOL_ECC_DBE_ERR, _ENABLE)        |
385         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTOREBUFERR, _ENABLE)           |
386         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_DBE_ERR, _ENABLE)     |
387         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _ATO_ERR, _ENABLE)                    |
388         DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CAMRSP_ERR, _ENABLE);
389 
390     chip_device->intr_mask.tstate.nonfatal =
391         DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOL_ECC_LIMIT_ERR, _ENABLE)      |
392         DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE);
393 }
394 
395 static void
396 _nvswitch_initialize_sourcetrack_interrupts
397 (
398     nvswitch_device *device
399 )
400 {
401     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
402 
403     chip_device->intr_mask.sourcetrack.fatal =
404         DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, _ENABLE) |
405         DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _DUP_CREQ_TCEN0_TAG_ERR, _ENABLE)     |
406         DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _INVALID_TCEN0_RSP_ERR, _ENABLE)      |
407         DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _INVALID_TCEN1_RSP_ERR, _ENABLE)      |
408         DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _SOURCETRACK_TIME_OUT_ERR, _ENABLE);
409 
410     chip_device->intr_mask.sourcetrack.nonfatal =
411         DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE);
412 }
413 
414 static void
415 _nvswitch_initialize_multicast_tstate_interrupts
416 (
417     nvswitch_device *device
418 )
419 {
420     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
421 
422     chip_device->intr_mask.mc_tstate.fatal =
423         DRF_DEF(_MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOL_ECC_DBE_ERR, _ENABLE)             |
424         DRF_DEF(_MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_BUF_OVERWRITE_ERR, _ENABLE)    |
425         DRF_DEF(_MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_DBE_ERR, _ENABLE);
426 
427     chip_device->intr_mask.mc_tstate.nonfatal =
428         DRF_DEF(_MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOL_ECC_LIMIT_ERR, _ENABLE)       |
429         DRF_DEF(_MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE)    |
430         DRF_DEF(_MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_MCTO_ERR, _ENABLE);
431 }
432 
433 static void
434 _nvswitch_initialize_reduction_tstate_interrupts
435 (
436     nvswitch_device *device
437 )
438 {
439     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
440 
441     chip_device->intr_mask.red_tstate.fatal =
442         DRF_DEF(_REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOL_ECC_DBE_ERR, _ENABLE)            |
443         DRF_DEF(_REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_BUF_OVERWRITE_ERR, _ENABLE)   |
444         DRF_DEF(_REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_DBE_ERR, _ENABLE);
445 
446     chip_device->intr_mask.red_tstate.nonfatal =
447         DRF_DEF(_REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOL_ECC_LIMIT_ERR, _ENABLE)      |
448         DRF_DEF(_REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE)   |
449         DRF_DEF(_REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_RTO_ERR, _ENABLE);
450 }
451 
452 void
453 _nvswitch_initialize_nport_interrupts_ls10
454 (
455     nvswitch_device *device
456 )
457 {
458 // Moving this L2 register access to SOE. Refer bug #3747687
459 #if 0
460     NvU32 val;
461 
462     val =
463         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 1) |
464         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 1) |
465         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 1);
466     NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _ERR_CONTROL_COMMON_NPORT, val);
467 #endif // 0
468 
469     _nvswitch_initialize_route_interrupts(device);
470     _nvswitch_initialize_ingress_interrupts(device);
471     _nvswitch_initialize_egress_interrupts(device);
472     _nvswitch_initialize_tstate_interrupts(device);
473     _nvswitch_initialize_sourcetrack_interrupts(device);
474     _nvswitch_initialize_multicast_tstate_interrupts(device);
475     _nvswitch_initialize_reduction_tstate_interrupts(device);
476 }
477 
478 void
479 _nvswitch_initialize_nxbar_interrupts_ls10
480 (
481     nvswitch_device *device
482 )
483 {
484     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
485     NvU32 report_fatal;
486 
487     report_fatal =
488         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_BUFFER_OVERFLOW, 1)     |
489         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_BUFFER_UNDERFLOW, 1)    |
490         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _EGRESS_CREDIT_OVERFLOW, 1)      |
491         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _EGRESS_CREDIT_UNDERFLOW, 1)     |
492         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_NON_BURSTY_PKT, 1)      |
493         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_NON_STICKY_PKT, 1)      |
494         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1)  |
495         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_PKT_INVALID_DST, 1)     |
496         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_PKT_PARITY_ERROR, 1)    |
497         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_SIDEBAND_PARITY_ERROR, 1) |
498         DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_REDUCTION_PKT_ERROR, 1);
499 
500 // Moving this L2 register access to SOE. Refer bug #3747687
501 #if 0
502     NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILE, _ERR_FATAL_INTR_EN, report_fatal);
503 #endif // 0
504 
505     chip_device->intr_mask.tile.fatal = report_fatal;
506     chip_device->intr_mask.tile.nonfatal = 0;
507 
508     report_fatal =
509         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BUFFER_OVERFLOW, 1)     |
510         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BUFFER_UNDERFLOW, 1)    |
511         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CREDIT_OVERFLOW, 1)      |
512         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CREDIT_UNDERFLOW, 1)     |
513         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_NON_BURSTY_PKT, 1)      |
514         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_NON_STICKY_PKT, 1)      |
515         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1)  |
516         DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CDT_PARITY_ERROR, 1);
517 
518 // Moving this L2 register access to SOE. Refer bug #3747687
519 #if 0
520     NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, report_fatal);
521 #endif // 0
522 
523     chip_device->intr_mask.tileout.fatal = report_fatal;
524     chip_device->intr_mask.tileout.nonfatal = 0;
525 }
526 
527 /*
528  * @brief Service MINION Falcon interrupts on the requested interrupt tree
529  *        Falcon Interrupts are a little unique in how they are handled:#include <assert.h>
530  *        IRQSTAT is used to read in interrupt status from FALCON
531  *        IRQMASK is used to read in mask of interrupts
532  *        IRQDEST is used to read in enabled interrupts that are routed to the HOST
533  *
534  *        IRQSTAT & IRQMASK gives the pending interrupting on this minion
535  *
536  * @param[in] device   MINION on this device
537  * @param[in] instance MINION instance
538  *
539  */
540 NvlStatus
541 nvswitch_minion_service_falcon_interrupts_ls10
542 (
543     nvswitch_device *device,
544     NvU32           instance
545 )
546 {
547     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
548     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
549     NvU32 pending, bit, unhandled, intr, link;
550 
551     link = instance * NVSWITCH_LINKS_PER_MINION_LS10;
552     report.raw_pending = NVSWITCH_MINION_RD32_LS10(device, instance, _CMINION, _FALCON_IRQSTAT);
553     report.raw_enable = chip_device->intr_minion_dest;
554     report.mask = NVSWITCH_MINION_RD32_LS10(device, instance, _CMINION, _FALCON_IRQMASK);
555 
556     pending = report.raw_pending & report.mask;
557 
558     if (pending == 0)
559     {
560         return -NVL_NOT_FOUND;
561     }
562 
563     unhandled = pending;
564 
565     bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _WDTMR, 1);
566     if (nvswitch_test_flags(pending, bit))
567     {
568         NVSWITCH_REPORT_FATAL(_HW_MINION_WATCHDOG, "MINION Watchdog timer ran out", NV_TRUE);
569         nvswitch_clear_flags(&unhandled, bit);
570     }
571 
572     bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _HALT, 1);
573     if (nvswitch_test_flags(pending, bit))
574     {
575         NVSWITCH_REPORT_FATAL(_HW_MINION_HALT, "MINION HALT", NV_TRUE);
576         nvswitch_clear_flags(&unhandled, bit);
577     }
578 
579     bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _EXTERR, 1);
580     if (nvswitch_test_flags(pending, bit))
581     {
582         NVSWITCH_REPORT_FATAL(_HW_MINION_EXTERR, "MINION EXTERR", NV_TRUE);
583         nvswitch_clear_flags(&unhandled, bit);
584     }
585 
586     bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _SWGEN0, 1);
587     if (nvswitch_test_flags(pending, bit))
588     {
589         NVSWITCH_PRINT(device, INFO,
590                       "%s: Received MINION Falcon SWGEN0 interrupt on MINION %d.\n",
591                       __FUNCTION__, instance);
592         nvswitch_clear_flags(&unhandled, bit);
593     }
594 
595     bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _SWGEN1, 1);
596     if (nvswitch_test_flags(pending, bit))
597     {
598         NVSWITCH_PRINT(device, INFO,
599                        "%s: Received MINION Falcon SWGEN1 interrupt on MINION %d.\n",
600                       __FUNCTION__, instance);
601         nvswitch_clear_flags(&unhandled, bit);
602     }
603 
604     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
605 
606     if (device->link[link].fatal_error_occurred)
607     {
608         intr = NVSWITCH_MINION_RD32_LS10(device, instance, _MINION, _MINION_INTR_STALL_EN);
609         intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _FATAL, _DISABLE, intr);
610         intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _FALCON_STALL, _DISABLE, intr);
611         intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _FATAL, _DISABLE, intr);
612         intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _NONFATAL, _DISABLE, intr);
613         NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _MINION_INTR_STALL_EN, intr);
614     }
615 
616     // Write to IRQSCLR to clear status of interrupt
617     NVSWITCH_MINION_WR32_LS10(device, instance, _CMINION, _FALCON_IRQSCLR, pending);
618 
619     if (unhandled != 0)
620     {
621         return -NVL_MORE_PROCESSING_REQUIRED;
622     }
623 
624     return NVL_SUCCESS;
625 }
626 
627 /*
628  * @Brief : Send priv ring command and wait for completion
629  *
630  * @Description :
631  *
632  * @param[in] device        a reference to the device to initialize
633  * @param[in] cmd           encoded priv ring command
634  */
635 static NvlStatus
636 _nvswitch_ring_master_cmd_ls10
637 (
638     nvswitch_device *device,
639     NvU32 cmd
640 )
641 {
642     NvU32 value;
643     NVSWITCH_TIMEOUT timeout;
644     NvBool           keepPolling;
645 
646     NVSWITCH_ENG_WR32(device, PRI_MASTER_RS, , 0, _PPRIV_MASTER, _RING_COMMAND, cmd);
647 
648     nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
649     do
650     {
651         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
652 
653         value = NVSWITCH_ENG_RD32(device, PRI_MASTER_RS, , 0, _PPRIV_MASTER, _RING_COMMAND);
654         if (FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
655         {
656             break;
657         }
658 
659         nvswitch_os_sleep(1);
660     }
661     while (keepPolling);
662 
663     if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
664     {
665         NVSWITCH_PRINT(device, ERROR,
666             "%s: Timeout waiting for RING_COMMAND == NO_CMD (cmd=0x%x).\n",
667             __FUNCTION__, cmd);
668         return -NVL_INITIALIZATION_TOTAL_FAILURE;
669     }
670 
671     return NVL_SUCCESS;
672 }
673 
674 static NvlStatus
675 _nvswitch_service_priv_ring_ls10
676 (
677     nvswitch_device *device
678 )
679 {
680     NvU32 pending, i;
681     NVSWITCH_PRI_ERROR_LOG_TYPE pri_error;
682     NvlStatus status = NVL_SUCCESS;
683 
684     pending = NVSWITCH_ENG_RD32(device, PRI_MASTER_RS, , 0, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0);
685     if (pending == 0)
686     {
687         return -NVL_NOT_FOUND;
688     }
689 
690     //
691     // SYS
692     //
693 
694     if (FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
695             _GBL_WRITE_ERROR_SYS, 1, pending))
696     {
697         pri_error.addr = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_ADR);
698         pri_error.data = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_WRDAT);
699         pri_error.info = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_INFO);
700         pri_error.code = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_CODE);
701 
702         NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_ERROR, "PRI WRITE SYS error", NVSWITCH_PPRIV_WRITE_SYS, 0, pri_error);
703 
704         NVSWITCH_PRINT(device, ERROR,
705             "SYS PRI write error addr: 0x%08x data: 0x%08x info: 0x%08x code: 0x%08x\n",
706             pri_error.addr, pri_error.data,
707             pri_error.info, pri_error.code);
708 
709         pending = FLD_SET_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
710             _GBL_WRITE_ERROR_SYS, 0, pending);
711     }
712 
713     //
714     // SYSB
715     //
716 
717     if (FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
718             _GBL_WRITE_ERROR_SYSB, 1, pending))
719     {
720         pri_error.addr = NVSWITCH_ENG_RD32(device, SYSB_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_ADR);
721         pri_error.data = NVSWITCH_ENG_RD32(device, SYSB_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_WRDAT);
722         pri_error.info = NVSWITCH_ENG_RD32(device, SYSB_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_INFO);
723         pri_error.code = NVSWITCH_ENG_RD32(device, SYSB_PRI_HUB, , 0, _PPRIV_SYS, _PRIV_ERROR_CODE);
724 
725         NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_ERROR, "PRI WRITE SYSB error", NVSWITCH_PPRIV_WRITE_SYS, 1, pri_error);
726 
727         NVSWITCH_PRINT(device, ERROR,
728             "SYSB PRI write error addr: 0x%08x data: 0x%08x info: 0x%08x code: 0x%08x\n",
729             pri_error.addr, pri_error.data,
730             pri_error.info, pri_error.code);
731 
732         pending = FLD_SET_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
733             _GBL_WRITE_ERROR_SYSB, 0, pending);
734     }
735 
736     //
737     // per-PRT
738     //
739 
740     for (i = 0; i < NUM_PRT_PRI_HUB_ENGINE_LS10; i++)
741     {
742         if (DRF_VAL(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
743             _GBL_WRITE_ERROR_FBP, pending) & NVBIT(i))
744         {
745             pri_error.addr = NVSWITCH_ENG_RD32(device, PRT_PRI_HUB, , i, _PPRIV_PRT, _PRIV_ERROR_ADR);
746             pri_error.data = NVSWITCH_ENG_RD32(device, PRT_PRI_HUB, , i, _PPRIV_PRT, _PRIV_ERROR_WRDAT);
747             pri_error.info = NVSWITCH_ENG_RD32(device, PRT_PRI_HUB, , i, _PPRIV_PRT, _PRIV_ERROR_INFO);
748             pri_error.code = NVSWITCH_ENG_RD32(device, PRT_PRI_HUB, , i, _PPRIV_PRT, _PRIV_ERROR_CODE);
749 
750             NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_ERROR, "PRI WRITE PRT error", NVSWITCH_PPRIV_WRITE_PRT, i, pri_error);
751 
752             NVSWITCH_PRINT(device, ERROR,
753                 "PRT%d PRI write error addr: 0x%08x data: 0x%08x info: 0x%08x code: 0x%08x\n",
754                 i, pri_error.addr, pri_error.data, pri_error.info, pri_error.code);
755 
756             pending &= ~DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
757                 _GBL_WRITE_ERROR_FBP, NVBIT(i));
758         }
759     }
760 
761     if (pending != 0)
762     {
763         NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR,
764             "Fatal, Unexpected PRI error\n");
765         NVSWITCH_LOG_FATAL_DATA(device, _HW, _HW_HOST_PRIV_ERROR, 2, 0, NV_FALSE, &pending);
766 
767         NVSWITCH_PRINT(device, ERROR,
768             "Unexpected PRI error 0x%08x\n", pending);
769         return -NVL_MORE_PROCESSING_REQUIRED;
770     }
771 
772     // acknowledge the interrupt to the ringmaster
773     status = _nvswitch_ring_master_cmd_ls10(device,
774         DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT));
775     if (status != NVL_SUCCESS)
776     {
777         NVSWITCH_PRINT(device, ERROR, "Timeout ACK'ing PRI error\n");
778         //
779         // Don't return error code -- there is nothing kernel SW can do about it if ACK failed.
780         // Likely it is PLM protected and SOE needs to handle it.
781         //
782     }
783 
784     return NVL_SUCCESS;
785 }
786 
787 static NvlStatus
788 _nvswitch_collect_nport_error_info_ls10
789 (
790     nvswitch_device    *device,
791     NvU32               link,
792     NVSWITCH_RAW_ERROR_LOG_TYPE *data,
793     NvU32               *idx,
794     NvU32               register_start,
795     NvU32               register_end
796 )
797 {
798     NvU32 register_block_size;
799     NvU32 i = *idx;
800 
801     if ((register_start > register_end) ||
802         (register_start % sizeof(NvU32) != 0) ||
803         (register_end % sizeof(NvU32) != 0))
804     {
805         return -NVL_BAD_ARGS;
806     }
807 
808     register_block_size = (register_end - register_start)/sizeof(NvU32) + 1;
809     if ((i + register_block_size > NVSWITCH_RAW_ERROR_LOG_DATA_SIZE) ||
810         (register_block_size > NVSWITCH_RAW_ERROR_LOG_DATA_SIZE))
811     {
812         return -NVL_BAD_ARGS;
813     }
814 
815     do
816     {
817         data->data[i] = NVSWITCH_ENG_OFF_RD32(device, NPORT, , link, register_start);
818         register_start += sizeof(NvU32);
819         i++;
820 
821     }
822     while (register_start <= register_end);
823 
824     *idx = i;
825     return NVL_SUCCESS;
826 }
827 
828 static void
829 _nvswitch_collect_error_info_ls10
830 (
831     nvswitch_device    *device,
832     NvU32               link,
833     NvU32               collect_flags,  // NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_*
834     NVSWITCH_RAW_ERROR_LOG_TYPE *data
835 )
836 {
837     NvU32 val;
838     NvU32 i = 0;
839     NvlStatus status = NVL_SUCCESS;
840 
841     //
842     // The requested data 'collect_flags' is captured, if valid.
843     // if the error log buffer fills, then the currently captured data block
844     // could be truncated and subsequent blocks will be skipped.
845     // The 'flags' field in the log structure describes which blocks are
846     // actually captured.
847     // Captured blocks are packed, in order.
848     //
849 
850     data->flags = 0;
851 
852     // ROUTE
853     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME)
854     {
855         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
856                      NV_ROUTE_ERR_TIMESTAMP_LOG,
857                      NV_ROUTE_ERR_TIMESTAMP_LOG);
858         if (status == NVL_SUCCESS)
859         {
860             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME;
861             NVSWITCH_PRINT(device, INFO,
862                 "ROUTE: TIMESTAMP: 0x%08x\n", data->data[i-1]);
863         }
864     }
865 
866     val = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_HEADER_LOG_VALID);
867     if (FLD_TEST_DRF_NUM(_ROUTE, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val))
868     {
869         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC)
870         {
871             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
872                          NV_ROUTE_ERR_MISC_LOG_0,
873                          NV_ROUTE_ERR_MISC_LOG_0);
874             if (status == NVL_SUCCESS)
875             {
876                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC;
877                 NVSWITCH_PRINT(device, INFO,
878                     "ROUTE: MISC: 0x%08x\n", data->data[i-1]);
879             }
880         }
881 
882         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR)
883         {
884             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
885                          NV_ROUTE_ERR_HEADER_LOG_4,
886                          NV_ROUTE_ERR_HEADER_LOG_10);
887             if (status == NVL_SUCCESS)
888             {
889                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR;
890                 NVSWITCH_PRINT(device, INFO,
891                     "ROUTE: HEADER: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
892                     data->data[i-8], data->data[i-7], data->data[i-6], data->data[i-5],
893                     data->data[i-4], data->data[i-3], data->data[i-2], data->data[i-1]);
894             }
895         }
896     }
897 
898     // INGRESS
899     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME)
900     {
901         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
902                      NV_INGRESS_ERR_TIMESTAMP_LOG,
903                      NV_INGRESS_ERR_TIMESTAMP_LOG);
904         if (status == NVL_SUCCESS)
905         {
906             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME;
907             NVSWITCH_PRINT(device, INFO,
908                 "INGRESS: TIMESTAMP: 0x%08x\n", data->data[i-1]);
909         }
910     }
911 
912     val = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_HEADER_LOG_VALID);
913     if (FLD_TEST_DRF_NUM(_INGRESS, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val))
914     {
915         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC)
916         {
917             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
918                          NV_INGRESS_ERR_MISC_LOG_0,
919                          NV_INGRESS_ERR_MISC_LOG_0);
920             if (status == NVL_SUCCESS)
921             {
922                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC;
923                 NVSWITCH_PRINT(device, INFO,
924                     "INGRESS: MISC: 0x%08x\n", data->data[i-1]);
925             }
926         }
927 
928         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR)
929         {
930             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
931                          NV_INGRESS_ERR_HEADER_LOG_4,
932                          NV_INGRESS_ERR_HEADER_LOG_9);
933             if (status == NVL_SUCCESS)
934             {
935                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR;
936                 NVSWITCH_PRINT(device, INFO,
937                     "INGRESS: HEADER: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
938                     data->data[i-7], data->data[i-6], data->data[i-5], data->data[i-4],
939                     data->data[i-3], data->data[i-2], data->data[i-1]);
940             }
941         }
942     }
943 
944     // EGRESS
945     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME)
946     {
947         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
948                      NV_EGRESS_ERR_TIMESTAMP_LOG,
949                      NV_EGRESS_ERR_TIMESTAMP_LOG);
950         if (status == NVL_SUCCESS)
951         {
952             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME;
953             NVSWITCH_PRINT(device, INFO,
954                 "EGRESS: TIMESTAMP: 0x%08x\n", data->data[i-1]);
955         }
956     }
957 
958     val = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_HEADER_LOG_VALID);
959     if (FLD_TEST_DRF_NUM(_EGRESS, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val))
960     {
961         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC)
962         {
963             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
964                          NV_EGRESS_ERR_MISC_LOG_0,
965                          NV_EGRESS_ERR_MISC_LOG_0);
966             if (status == NVL_SUCCESS)
967             {
968                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC;
969                 NVSWITCH_PRINT(device, INFO,
970                     "EGRESS: MISC: 0x%08x\n", data->data[i-1]);
971             }
972         }
973 
974         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR)
975         {
976             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
977                          NV_EGRESS_ERR_HEADER_LOG_4,
978                          NV_EGRESS_ERR_HEADER_LOG_10);
979             if (status == NVL_SUCCESS)
980             {
981                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR;
982                 NVSWITCH_PRINT(device, INFO,
983                     "EGRESS: HEADER: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
984                     data->data[i-7], data->data[i-6], data->data[i-5], data->data[i-4],
985                     data->data[i-3], data->data[i-2], data->data[i-1]);
986             }
987         }
988     }
989 
990     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_TIME)
991     {
992         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
993                      NV_EGRESS_MC_ERR_TIMESTAMP_LOG,
994                      NV_EGRESS_MC_ERR_TIMESTAMP_LOG);
995         if (status == NVL_SUCCESS)
996         {
997             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_TIME;
998             NVSWITCH_PRINT(device, INFO,
999                 "EGRESS: TIME MC: 0x%08x\n", data->data[i-1]);
1000         }
1001     }
1002 
1003     val = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _MC_ERR_HEADER_LOG_VALID);
1004     if (FLD_TEST_DRF_NUM(_EGRESS, _MC_ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val))
1005     {
1006         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_MISC)
1007         {
1008             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
1009                          NV_EGRESS_MC_ERR_MISC_LOG_0,
1010                          NV_EGRESS_MC_ERR_MISC_LOG_0);
1011             if (status == NVL_SUCCESS)
1012             {
1013                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_MISC;
1014                 NVSWITCH_PRINT(device, INFO,
1015                     "EGRESS: MISC MC: 0x%08x\n", data->data[i-1]);
1016             }
1017         }
1018 
1019         if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_HDR)
1020         {
1021             status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
1022                          NV_EGRESS_MC_ERR_HEADER_LOG_4,
1023                          NV_EGRESS_MC_ERR_HEADER_LOG_10);
1024             if (status == NVL_SUCCESS)
1025             {
1026                 data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_HDR;
1027                 NVSWITCH_PRINT(device, INFO,
1028                     "EGRESS MC: HEADER: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1029                     data->data[i-7], data->data[i-6], data->data[i-5], data->data[i-4],
1030                     data->data[i-3], data->data[i-2], data->data[i-1]);
1031             }
1032         }
1033     }
1034 
1035     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_MC_TIME)
1036     {
1037         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
1038                      NV_MULTICASTTSTATE_ERR_TIMESTAMP_LOG,
1039                      NV_MULTICASTTSTATE_ERR_TIMESTAMP_LOG);
1040         if (status == NVL_SUCCESS)
1041         {
1042             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_MC_TIME;
1043             NVSWITCH_PRINT(device, INFO,
1044                 "MC TSTATE MC: 0x%08x\n",
1045                 data->data[i-1]);
1046         }
1047     }
1048 
1049     if (collect_flags & NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_RED_TIME)
1050     {
1051         status = _nvswitch_collect_nport_error_info_ls10(device, link, data, &i,
1052                      NV_REDUCTIONTSTATE_ERR_TIMESTAMP_LOG,
1053                      NV_REDUCTIONTSTATE_ERR_TIMESTAMP_LOG);
1054         if (status == NVL_SUCCESS)
1055         {
1056             data->flags |= NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_RED_TIME;
1057             NVSWITCH_PRINT(device, INFO,
1058                 "MC TSTATE RED: 0x%08x\n",
1059                 data->data[i-1]);
1060         }
1061     }
1062 
1063     while (i < NVSWITCH_RAW_ERROR_LOG_DATA_SIZE)
1064     {
1065         data->data[i++] = 0;
1066     }
1067 }
1068 
1069 static NvlStatus
1070 _nvswitch_service_route_fatal_ls10
1071 (
1072     nvswitch_device *device,
1073     NvU32            link
1074 )
1075 {
1076     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
1077     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
1078     NvU32 pending, bit, contain, unhandled;
1079     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
1080     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
1081 
1082     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0);
1083     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_FATAL_REPORT_EN_0);
1084     report.mask = report.raw_enable & chip_device->intr_mask.route.fatal;
1085     pending = report.raw_pending & report.mask;
1086 
1087     if (pending == 0)
1088     {
1089         return -NVL_NOT_FOUND;
1090     }
1091 
1092     unhandled = pending;
1093 
1094     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_FIRST_0);
1095     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_CONTAIN_EN_0);
1096 
1097     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _ROUTEBUFERR, 1);
1098     if (nvswitch_test_flags(pending, bit))
1099     {
1100         _nvswitch_collect_error_info_ls10(device, link,
1101             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1102             &data);
1103         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_ROUTEBUFERR, "route buffer over/underflow", NV_FALSE);
1104         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_ROUTEBUFERR, data);
1105         nvswitch_clear_flags(&unhandled, bit);
1106     }
1107 
1108     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _GLT_ECC_DBE_ERR, 1);
1109     if (nvswitch_test_flags(pending, bit))
1110     {
1111         NvBool bAddressValid = NV_FALSE;
1112         NvU32 address = 0;
1113         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE,
1114                 _ERR_GLT_ECC_ERROR_ADDRESS_VALID);
1115 
1116         if (FLD_TEST_DRF(_ROUTE_ERR_GLT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
1117                          addressValid))
1118         {
1119             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE,
1120                                                _ERR_GLT_ECC_ERROR_ADDRESS);
1121             bAddressValid = NV_TRUE;
1122         }
1123 
1124         _nvswitch_collect_error_info_ls10(device, link,
1125             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1126             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1127             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1128             &data);
1129         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, "route GLT DBE", NV_FALSE);
1130         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, data);
1131         nvswitch_clear_flags(&unhandled, bit);
1132 
1133         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1134             NVSWITCH_ERR_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, link, bAddressValid,
1135             address, NV_TRUE, 1);
1136 
1137         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1138     }
1139 
1140     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _PDCTRLPARERR, 1);
1141     if (nvswitch_test_flags(pending, bit))
1142     {
1143         _nvswitch_collect_error_info_ls10(device, link,
1144             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1145             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1146             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1147             &data);
1148         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_PDCTRLPARERR, "route parity", NV_FALSE);
1149         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_PDCTRLPARERR, data);
1150         nvswitch_clear_flags(&unhandled, bit);
1151     }
1152 
1153     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_DBE_ERR, 1);
1154     if (nvswitch_test_flags(pending, bit))
1155     {
1156         _nvswitch_collect_error_info_ls10(device, link,
1157             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1158             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1159             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1160             &data);
1161         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, "route incoming DBE", NV_FALSE);
1162         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, data);
1163         nvswitch_clear_flags(&unhandled, bit);
1164 
1165         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1166             NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, link, NV_FALSE, 0,
1167             NV_TRUE, 1);
1168 
1169         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1170 
1171         // Clear associated LIMIT_ERR interrupt
1172         if (report.raw_pending & DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1))
1173         {
1174             NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0,
1175                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1));
1176         }
1177     }
1178 
1179     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _CDTPARERR, 1);
1180     if (nvswitch_test_flags(pending, bit))
1181     {
1182         _nvswitch_collect_error_info_ls10(device, link,
1183             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1184             &data);
1185         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_CDTPARERR, "route credit parity", NV_FALSE);
1186         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_CDTPARERR, data);
1187         nvswitch_clear_flags(&unhandled, bit);
1188 
1189         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1190             NVSWITCH_ERR_HW_NPORT_ROUTE_CDTPARERR, link, NV_FALSE, 0,
1191             NV_TRUE, 1);
1192 
1193         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1194     }
1195 
1196     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _MCRID_ECC_DBE_ERR, 1);
1197     if (nvswitch_test_flags(pending, bit))
1198     {
1199         _nvswitch_collect_error_info_ls10(device, link,
1200             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1201             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1202             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1203             &data);
1204         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_MCRID_ECC_DBE_ERR, "MC route ECC", NV_FALSE);
1205         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_MCRID_ECC_DBE_ERR, data);
1206         nvswitch_clear_flags(&unhandled, bit);
1207 
1208         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1209             NVSWITCH_ERR_HW_NPORT_ROUTE_MCRID_ECC_DBE_ERR, link, NV_FALSE, 0,
1210             NV_TRUE, 1);
1211 
1212         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1213     }
1214 
1215     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _EXTMCRID_ECC_DBE_ERR, 1);
1216     if (nvswitch_test_flags(pending, bit))
1217     {
1218         _nvswitch_collect_error_info_ls10(device, link,
1219             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1220             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1221             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1222             &data);
1223         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_EXTMCRID_ECC_DBE_ERR, "Extd MC route ECC", NV_FALSE);
1224         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_EXTMCRID_ECC_DBE_ERR, data);
1225         nvswitch_clear_flags(&unhandled, bit);
1226 
1227         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1228             NVSWITCH_ERR_HW_NPORT_ROUTE_EXTMCRID_ECC_DBE_ERR, link, NV_FALSE, 0,
1229             NV_TRUE, 1);
1230 
1231         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1232     }
1233 
1234     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _RAM_ECC_DBE_ERR, 1);
1235     if (nvswitch_test_flags(pending, bit))
1236     {
1237         _nvswitch_collect_error_info_ls10(device, link,
1238             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1239             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1240             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1241             &data);
1242         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_RAM_ECC_DBE_ERR, "route RAM ECC", NV_FALSE);
1243         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_RAM_ECC_DBE_ERR, data);
1244         nvswitch_clear_flags(&unhandled, bit);
1245 
1246         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1247             NVSWITCH_ERR_HW_NPORT_ROUTE_RAM_ECC_DBE_ERR, link, NV_FALSE, 0,
1248             NV_TRUE, 1);
1249 
1250         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1251     }
1252 
1253     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
1254 
1255     // Disable interrupts that have occurred after fatal error.
1256     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
1257     if (device->link[link].fatal_error_occurred)
1258     {
1259         NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_FATAL_REPORT_EN_0,
1260                 report.raw_enable ^ pending);
1261     }
1262 
1263     if (report.raw_first & report.mask)
1264     {
1265         NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_FIRST_0,
1266                 report.raw_first & report.mask);
1267     }
1268     NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0, pending);
1269 
1270     if (unhandled != 0)
1271     {
1272         return -NVL_MORE_PROCESSING_REQUIRED;
1273     }
1274 
1275     return NVL_SUCCESS;
1276 }
1277 
1278 static NvlStatus
1279 _nvswitch_service_route_nonfatal_ls10
1280 (
1281     nvswitch_device *device,
1282     NvU32            link
1283 )
1284 {
1285     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
1286     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
1287     NvU32 pending, bit, unhandled;
1288     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
1289     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
1290 
1291     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0);
1292     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0);
1293     report.mask = report.raw_enable & chip_device->intr_mask.route.nonfatal;
1294     pending = report.raw_pending & report.mask;
1295 
1296     if (pending == 0)
1297     {
1298         return -NVL_NOT_FOUND;
1299     }
1300 
1301     unhandled = pending;
1302     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_FIRST_0);
1303 
1304     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NOPORTDEFINEDERR, 1);
1305     if (nvswitch_test_flags(pending, bit))
1306     {
1307         _nvswitch_collect_error_info_ls10(device, link,
1308             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1309             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1310             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1311             &data);
1312         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NOPORTDEFINEDERR, "route undefined route");
1313         NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_NOPORTDEFINEDERR, data);
1314         nvswitch_clear_flags(&unhandled, bit);
1315     }
1316 
1317     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _INVALIDROUTEPOLICYERR, 1);
1318     if (nvswitch_test_flags(pending, bit))
1319     {
1320         _nvswitch_collect_error_info_ls10(device, link,
1321             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME |
1322             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_MISC |
1323             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_HDR,
1324             &data);
1325         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_INVALIDROUTEPOLICYERR, "route invalid policy");
1326         NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_INVALIDROUTEPOLICYERR, data);
1327         nvswitch_clear_flags(&unhandled, bit);
1328     }
1329 
1330     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1);
1331     if (nvswitch_test_flags(pending, bit))
1332     {
1333         // Ignore LIMIT error if DBE is pending
1334         if (!(nvswitch_test_flags(report.raw_pending,
1335                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_DBE_ERR, 1))))
1336         {
1337             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER);
1338             _nvswitch_collect_error_info_ls10(device, link,
1339                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1340                 &data);
1341             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, "route incoming ECC limit");
1342             NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, data);
1343 
1344             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1345                 NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1346                 NV_FALSE, 1);
1347 
1348             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1349         }
1350 
1351         nvswitch_clear_flags(&unhandled, bit);
1352     }
1353 
1354     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _GLT_ECC_LIMIT_ERR, 1);
1355     if (nvswitch_test_flags(pending, bit))
1356     {
1357         // Ignore LIMIT error if DBE is pending
1358         if (!(nvswitch_test_flags(report.raw_pending,
1359                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _GLT_ECC_DBE_ERR, 1))))
1360         {
1361             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_GLT_ECC_ERROR_COUNTER);
1362             _nvswitch_collect_error_info_ls10(device, link,
1363                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1364                 &data);
1365             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, "GLT ECC limit");
1366             NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_GLT_ECC_LIMIT_ERR, data);
1367 
1368             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1369                 NVSWITCH_ERR_HW_NPORT_ROUTE_GLT_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1370                 NV_FALSE, 1);
1371 
1372             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1373         }
1374 
1375         nvswitch_clear_flags(&unhandled, bit);
1376     }
1377 
1378     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _MCRID_ECC_LIMIT_ERR, 1);
1379     if (nvswitch_test_flags(pending, bit))
1380     {
1381         // Ignore LIMIT error if DBE is pending
1382         if (!(nvswitch_test_flags(report.raw_pending,
1383                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _MCRID_ECC_DBE_ERR, 1))))
1384         {
1385             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_MCRID_ECC_ERROR_COUNTER);
1386             _nvswitch_collect_error_info_ls10(device, link,
1387                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1388                 &data);
1389             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, "MCRID ECC limit");
1390             NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_MCRID_ECC_LIMIT_ERR, data);
1391 
1392             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1393                 NVSWITCH_ERR_HW_NPORT_ROUTE_MCRID_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1394                 NV_FALSE, 1);
1395 
1396             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1397         }
1398 
1399         nvswitch_clear_flags(&unhandled, bit);
1400     }
1401 
1402     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _EXTMCRID_ECC_LIMIT_ERR, 1);
1403     if (nvswitch_test_flags(pending, bit))
1404     {
1405         // Ignore LIMIT error if DBE is pending
1406         if (!(nvswitch_test_flags(report.raw_pending,
1407                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _EXTMCRID_ECC_DBE_ERR, 1))))
1408         {
1409             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_EXTMCRID_ECC_ERROR_COUNTER);
1410             _nvswitch_collect_error_info_ls10(device, link,
1411                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1412                 &data);
1413             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, "EXTMCRID ECC limit");
1414             NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_EXTMCRID_ECC_LIMIT_ERR, data);
1415 
1416             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1417                 NVSWITCH_ERR_HW_NPORT_ROUTE_EXTMCRID_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1418                 NV_FALSE, 1);
1419 
1420             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1421         }
1422 
1423         nvswitch_clear_flags(&unhandled, bit);
1424     }
1425 
1426     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _RAM_ECC_LIMIT_ERR, 1);
1427     if (nvswitch_test_flags(pending, bit))
1428     {
1429         // Ignore LIMIT error if DBE is pending
1430         if (!(nvswitch_test_flags(report.raw_pending,
1431                 DRF_NUM(_ROUTE, _ERR_STATUS_0, _RAM_ECC_DBE_ERR, 1))))
1432         {
1433             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_RAM_ECC_ERROR_COUNTER);
1434             _nvswitch_collect_error_info_ls10(device, link,
1435                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1436                 &data);
1437             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_RAM_ECC_LIMIT_ERR, "RAM ECC limit");
1438             NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_RAM_ECC_LIMIT_ERR, data);
1439 
1440             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1441                 NVSWITCH_ERR_HW_NPORT_ROUTE_RAM_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1442                 NV_FALSE, 1);
1443 
1444             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1445         }
1446 
1447         nvswitch_clear_flags(&unhandled, bit);
1448     }
1449 
1450     bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _INVALID_MCRID_ERR, 1);
1451     if (nvswitch_test_flags(pending, bit))
1452     {
1453         _nvswitch_collect_error_info_ls10(device, link,
1454             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
1455             &data);
1456         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_INVALID_MCRID_ERR, "invalid MC route");
1457         NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_INVALID_MCRID_ERR, data);
1458         nvswitch_clear_flags(&unhandled, bit);
1459     }
1460 
1461     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
1462 
1463     // Disable interrupts that have occurred after fatal error.
1464     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
1465     if (device->link[link].fatal_error_occurred)
1466     {
1467         NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0,
1468                 report.raw_enable ^ pending);
1469     }
1470 
1471     if (report.raw_first & report.mask)
1472     {
1473         NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_FIRST_0,
1474                 report.raw_first & report.mask);
1475     }
1476 
1477     NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0, pending);
1478 
1479     //
1480     // Note, when traffic is flowing, if we reset ERR_COUNT before ERR_STATUS
1481     // register, we won't see an interrupt again until counter wraps around.
1482     // In that case, we will miss writing back many ECC victim entries. Hence,
1483     // always clear _ERR_COUNT only after _ERR_STATUS register is cleared!
1484     //
1485     NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, 0x0);
1486 
1487     if (unhandled != 0)
1488     {
1489         return -NVL_MORE_PROCESSING_REQUIRED;
1490     }
1491 
1492     return NVL_SUCCESS;
1493 }
1494 
1495 //
1496 // Ingress
1497 //
1498 
1499 static NvlStatus
1500 _nvswitch_service_ingress_fatal_ls10
1501 (
1502     nvswitch_device *device,
1503     NvU32            link
1504 )
1505 {
1506     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
1507     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
1508     NvU32 pending, bit, contain, unhandled;
1509     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
1510     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
1511 
1512     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0);
1513     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_FATAL_REPORT_EN_0);
1514     report.mask = report.raw_enable & chip_device->intr_mask.ingress[0].fatal;
1515     pending = report.raw_pending & report.mask;
1516 
1517     if (pending == 0)
1518     {
1519         return -NVL_NOT_FOUND;
1520     }
1521 
1522     unhandled = pending;
1523     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_FIRST_0);
1524     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_CONTAIN_EN_0);
1525     _nvswitch_collect_error_info_ls10(device, link,
1526         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
1527         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
1528         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
1529         &data);
1530 
1531     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _CMDDECODEERR, 1);
1532     if (nvswitch_test_flags(pending, bit))
1533     {
1534         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_CMDDECODEERR, "ingress invalid command", NV_FALSE);
1535         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_CMDDECODEERR, data);
1536         nvswitch_clear_flags(&unhandled, bit);
1537     }
1538 
1539     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_ECC_DBE_ERR, 1);
1540     if (nvswitch_test_flags(pending, bit))
1541     {
1542         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_COUNTER);
1543         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_ADDRESS);
1544         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_ADDRESS_VALID);
1545         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_DBE_ERR, "ingress ExtA remap DBE", NV_FALSE);
1546         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_DBE_ERR, data);
1547         nvswitch_clear_flags(&unhandled, bit);
1548 
1549         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1550             NVSWITCH_ERR_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_DBE_ERR, link, NV_FALSE, 0,
1551             NV_TRUE, 1);
1552 
1553         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1554     }
1555 
1556     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1);
1557     if (nvswitch_test_flags(pending, bit))
1558     {
1559         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER);
1560         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, "ingress header DBE", NV_FALSE);
1561         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, data);
1562         nvswitch_clear_flags(&unhandled, bit);
1563 
1564         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1565             NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, link, NV_FALSE, 0,
1566             NV_TRUE, 1);
1567 
1568         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1569 
1570         // Clear associated LIMIT_ERR interrupt
1571         if (report.raw_pending & DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1))
1572         {
1573             NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0,
1574                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1));
1575         }
1576     }
1577 
1578     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _INVALIDVCSET, 1);
1579     if (nvswitch_test_flags(pending, bit))
1580     {
1581         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_INVALIDVCSET, "ingress invalid VCSet", NV_FALSE);
1582         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_INVALIDVCSET, data);
1583         nvswitch_clear_flags(&unhandled, bit);
1584     }
1585 
1586     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _REMAPTAB_ECC_DBE_ERR, 1);
1587     if (nvswitch_test_flags(pending, bit))
1588     {
1589         NvBool bAddressValid = NV_FALSE;
1590         NvU32 address = 0;
1591         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1592                 _ERR_REMAPTAB_ECC_ERROR_ADDRESS);
1593 
1594         if (FLD_TEST_DRF(_INGRESS_ERR_REMAPTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
1595                          addressValid))
1596         {
1597             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1598                                                _ERR_REMAPTAB_ECC_ERROR_ADDRESS);
1599             bAddressValid = NV_TRUE;
1600         }
1601 
1602         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_COUNTER);
1603         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_ADDRESS);
1604         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID);
1605         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, "ingress Remap DBE", NV_FALSE);
1606         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, data);
1607         nvswitch_clear_flags(&unhandled, bit);
1608 
1609         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1610             NVSWITCH_ERR_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, link, bAddressValid,
1611             address, NV_TRUE, 1);
1612 
1613         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1614     }
1615 
1616     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTAB_ECC_DBE_ERR, 1);
1617     if (nvswitch_test_flags(pending, bit))
1618     {
1619         NvBool bAddressValid = NV_FALSE;
1620         NvU32 address = 0;
1621         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1622                 _ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID);
1623 
1624         if (FLD_TEST_DRF(_INGRESS_ERR_RIDTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
1625                          addressValid))
1626         {
1627             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1628                                                _ERR_RIDTAB_ECC_ERROR_ADDRESS);
1629             bAddressValid = NV_TRUE;
1630         }
1631 
1632         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_COUNTER);
1633         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_ADDRESS);
1634         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID);
1635         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, "ingress RID DBE", NV_FALSE);
1636         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, data);
1637         nvswitch_clear_flags(&unhandled, bit);
1638 
1639         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1640             NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, link, bAddressValid,
1641             address, NV_TRUE, 1);
1642 
1643         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1644     }
1645 
1646     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTAB_ECC_DBE_ERR, 1);
1647     if (nvswitch_test_flags(pending, bit))
1648     {
1649         NvBool bAddressValid = NV_FALSE;
1650         NvU32 address = 0;
1651         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1652                 _ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID);
1653 
1654         if (FLD_TEST_DRF(_INGRESS_ERR_RLANTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
1655                          addressValid))
1656         {
1657             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS,
1658                                                _ERR_RLANTAB_ECC_ERROR_ADDRESS);
1659             bAddressValid = NV_TRUE;
1660         }
1661 
1662         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_COUNTER);
1663         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_ADDRESS);
1664         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID);
1665         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, "ingress RLAN DBE", NV_FALSE);
1666         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, data);
1667         nvswitch_clear_flags(&unhandled, bit);
1668 
1669         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1670             NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, link, bAddressValid,
1671             address, NV_TRUE, 1);
1672 
1673         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1674     }
1675 
1676     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1);
1677     if (nvswitch_test_flags(pending, bit))
1678     {
1679         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, "ingress control parity", NV_FALSE);
1680         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, data);
1681         nvswitch_clear_flags(&unhandled, bit);
1682 
1683         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1684             NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, link, NV_FALSE, 0,
1685             NV_TRUE, 1);
1686 
1687         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1688     }
1689 
1690     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_ECC_DBE_ERR, 1);
1691     if (nvswitch_test_flags(pending, bit))
1692     {
1693         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_COUNTER);
1694         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_ADDRESS);
1695         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_ADDRESS_VALID);
1696         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_DBE_ERR, "ingress ExtB remap DBE", NV_FALSE);
1697         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_DBE_ERR, data);
1698         nvswitch_clear_flags(&unhandled, bit);
1699 
1700         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1701             NVSWITCH_ERR_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_DBE_ERR, link, NV_FALSE, 0,
1702             NV_TRUE, 1);
1703 
1704         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1705     }
1706 
1707     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_ECC_DBE_ERR, 1);
1708     if (nvswitch_test_flags(pending, bit))
1709     {
1710         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_COUNTER);
1711         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_ADDRESS);
1712         report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_ADDRESS_VALID);
1713         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_DBE_ERR, "ingress MC remap DBE", NV_FALSE);
1714         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_DBE_ERR, data);
1715         nvswitch_clear_flags(&unhandled, bit);
1716 
1717         _nvswitch_construct_ecc_error_event_ls10(&err_event,
1718             NVSWITCH_ERR_HW_NPORT_INGRESS_MCREMAPTAB_ECC_DBE_ERR, link, NV_FALSE, 0,
1719             NV_TRUE, 1);
1720 
1721         nvswitch_inforom_ecc_log_err_event(device, &err_event);
1722     }
1723 
1724     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
1725 
1726     // Disable interrupts that have occurred after fatal error.
1727     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
1728     if (device->link[link].fatal_error_occurred)
1729     {
1730         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FATAL_REPORT_EN_0,
1731                 report.raw_enable ^ pending);
1732     }
1733 
1734     if (report.raw_first & report.mask)
1735     {
1736         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FIRST_0,
1737             report.raw_first & report.mask);
1738     }
1739 
1740     NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0, pending);
1741 
1742     if (unhandled != 0)
1743     {
1744         return -NVL_MORE_PROCESSING_REQUIRED;
1745     }
1746 
1747     return NVL_SUCCESS;
1748 }
1749 
1750 static NvlStatus
1751 _nvswitch_service_ingress_nonfatal_ls10
1752 (
1753     nvswitch_device *device,
1754     NvU32            link
1755 )
1756 {
1757     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
1758     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
1759     NvU32 pending, bit, unhandled;
1760     NvU32 pending_0, pending_1;
1761     NvU32 raw_pending_0;
1762     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
1763     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
1764     NvlStatus status = NVL_SUCCESS;
1765 
1766     //
1767     // _ERR_STATUS_0
1768     //
1769     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0);
1770     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0);
1771     report.mask = report.raw_enable & chip_device->intr_mask.ingress[0].nonfatal;
1772 
1773     raw_pending_0 = report.raw_pending;
1774     pending = (report.raw_pending & report.mask);
1775     pending_0 = pending;
1776 
1777     if (pending == 0)
1778     {
1779         goto _nvswitch_service_ingress_nonfatal_ls10_err_status_1;
1780     }
1781 
1782     unhandled = pending;
1783     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_FIRST_0);
1784     _nvswitch_collect_error_info_ls10(device, link,
1785         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
1786         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
1787         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
1788         &data);
1789 
1790     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _REQCONTEXTMISMATCHERR, 1);
1791     if (nvswitch_test_flags(pending, bit))
1792     {
1793         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_REQCONTEXTMISMATCHERR, "ingress request context mismatch");
1794         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_REQCONTEXTMISMATCHERR, data);
1795         nvswitch_clear_flags(&unhandled, bit);
1796     }
1797 
1798     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ACLFAIL, 1);
1799     if (nvswitch_test_flags(pending, bit))
1800     {
1801         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ACLFAIL, "ingress invalid ACL");
1802         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ACLFAIL, data);
1803         nvswitch_clear_flags(&unhandled, bit);
1804     }
1805 
1806     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1);
1807     if (nvswitch_test_flags(pending, bit))
1808     {
1809         // Ignore LIMIT error if DBE is pending
1810         if (!(nvswitch_test_flags(report.raw_pending,
1811                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1))))
1812         {
1813             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER);
1814             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, "ingress header ECC");
1815             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, data);
1816 
1817             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1818                 NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1819                 NV_FALSE, 1);
1820 
1821             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1822         }
1823 
1824         nvswitch_clear_flags(&unhandled, bit);
1825     }
1826 
1827     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ADDRBOUNDSERR, 1);
1828     if (nvswitch_test_flags(pending, bit))
1829     {
1830         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ADDRBOUNDSERR, "ingress address bounds");
1831         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ADDRBOUNDSERR, data);
1832         nvswitch_clear_flags(&unhandled, bit);
1833     }
1834 
1835     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTABCFGERR, 1);
1836     if (nvswitch_test_flags(pending, bit))
1837     {
1838         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RIDTABCFGERR, "ingress RID packet");
1839         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RIDTABCFGERR, data);
1840         nvswitch_clear_flags(&unhandled, bit);
1841     }
1842 
1843     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTABCFGERR, 1);
1844     if (nvswitch_test_flags(pending, bit))
1845     {
1846         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RLANTABCFGERR, "ingress RLAN packet");
1847         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RLANTABCFGERR, data);
1848         nvswitch_clear_flags(&unhandled, bit);
1849     }
1850 
1851 
1852     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _REMAPTAB_ECC_LIMIT_ERR, 1);
1853     if (nvswitch_test_flags(pending, bit))
1854     {
1855         // Ignore LIMIT error if DBE is pending
1856         if (!(nvswitch_test_flags(report.raw_pending,
1857                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _REMAPTAB_ECC_DBE_ERR, 1))))
1858         {
1859             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_COUNTER);
1860             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR, "ingress remap ECC");
1861             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR, data);
1862 
1863             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1864                 NVSWITCH_ERR_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1865                 NV_FALSE, 1);
1866 
1867             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1868         }
1869 
1870         nvswitch_clear_flags(&unhandled, bit);
1871     }
1872 
1873     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTAB_ECC_LIMIT_ERR, 1);
1874     if (nvswitch_test_flags(pending, bit))
1875     {
1876         // Ignore LIMIT error if DBE is pending
1877         if (!(nvswitch_test_flags(report.raw_pending,
1878                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTAB_ECC_DBE_ERR, 1))))
1879         {
1880             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_COUNTER);
1881             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR, "ingress RID ECC");
1882             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR, data);
1883 
1884             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1885                 NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1886                 NV_FALSE, 1);
1887 
1888             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1889         }
1890 
1891         nvswitch_clear_flags(&unhandled, bit);
1892     }
1893 
1894     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTAB_ECC_LIMIT_ERR, 1);
1895     if (nvswitch_test_flags(pending, bit))
1896     {
1897         // Ignore LIMIT error if DBE is pending
1898         if (!(nvswitch_test_flags(report.raw_pending,
1899                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTAB_ECC_DBE_ERR, 1))))
1900         {
1901             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_COUNTER);
1902             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR, "ingress RLAN ECC");
1903             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR, data);
1904 
1905             _nvswitch_construct_ecc_error_event_ls10(&err_event,
1906                 NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
1907                 NV_FALSE, 1);
1908 
1909             nvswitch_inforom_ecc_log_err_event(device, &err_event);
1910         }
1911 
1912         nvswitch_clear_flags(&unhandled, bit);
1913     }
1914 
1915 
1916     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ADDRTYPEERR, 1);
1917     if (nvswitch_test_flags(pending, bit))
1918     {
1919         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ADDRTYPEERR, "ingress illegal address");
1920         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ADDRTYPEERR, data);
1921         nvswitch_clear_flags(&unhandled, bit);
1922     }
1923 
1924 
1925     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_INDEX_ERR, 1);
1926     if (nvswitch_test_flags(pending, bit))
1927     {
1928         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_INDEX_ERR, "ingress ExtA remap index");
1929         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_INDEX_ERR, data);
1930         nvswitch_clear_flags(&unhandled, bit);
1931     }
1932 
1933     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_INDEX_ERR, 1);
1934     if (nvswitch_test_flags(pending, bit))
1935     {
1936         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_INDEX_ERR, "ingress ExtB remap index");
1937         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_INDEX_ERR, data);
1938         nvswitch_clear_flags(&unhandled, bit);
1939     }
1940 
1941     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_INDEX_ERR, 1);
1942     if (nvswitch_test_flags(pending, bit))
1943     {
1944         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_INDEX_ERR, "ingress MC remap index");
1945         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_INDEX_ERR, data);
1946         nvswitch_clear_flags(&unhandled, bit);
1947     }
1948 
1949     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_REQCONTEXTMISMATCHERR, 1);
1950     if (nvswitch_test_flags(pending, bit))
1951     {
1952         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_REQCONTEXTMISMATCHERR, "ingress ExtA request context mismatch");
1953         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_REQCONTEXTMISMATCHERR, data);
1954         nvswitch_clear_flags(&unhandled, bit);
1955     }
1956 
1957     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_REQCONTEXTMISMATCHERR, 1);
1958     if (nvswitch_test_flags(pending, bit))
1959     {
1960         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_REQCONTEXTMISMATCHERR, "ingress ExtB request context mismatch");
1961         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_REQCONTEXTMISMATCHERR, data);
1962         nvswitch_clear_flags(&unhandled, bit);
1963     }
1964 
1965     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_REQCONTEXTMISMATCHERR, 1);
1966     if (nvswitch_test_flags(pending, bit))
1967     {
1968         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_REQCONTEXTMISMATCHERR, "ingress MC request context mismatch");
1969         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_REQCONTEXTMISMATCHERR, data);
1970         nvswitch_clear_flags(&unhandled, bit);
1971     }
1972 
1973     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_ACLFAIL, 1);
1974     if (nvswitch_test_flags(pending, bit))
1975     {
1976         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_ACLFAIL, "ingress invalid ExtA ACL");
1977         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ACLFAIL, data);
1978         nvswitch_clear_flags(&unhandled, bit);
1979     }
1980 
1981     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_ACLFAIL, 1);
1982     if (nvswitch_test_flags(pending, bit))
1983     {
1984         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_ACLFAIL, "ingress invalid ExtB ACL");
1985         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ACLFAIL, data);
1986         nvswitch_clear_flags(&unhandled, bit);
1987     }
1988 
1989     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_ACLFAIL, 1);
1990     if (nvswitch_test_flags(pending, bit))
1991     {
1992         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_ACLFAIL, "ingress invalid MC ACL");
1993         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ACLFAIL, data);
1994         nvswitch_clear_flags(&unhandled, bit);
1995     }
1996 
1997     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_ADDRBOUNDSERR, 1);
1998     if (nvswitch_test_flags(pending, bit))
1999     {
2000         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_ADDRBOUNDSERR, "ingress ExtA address bounds");
2001         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ADDRBOUNDSERR, data);
2002         nvswitch_clear_flags(&unhandled, bit);
2003     }
2004 
2005     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_ADDRBOUNDSERR, 1);
2006     if (nvswitch_test_flags(pending, bit))
2007     {
2008         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_ADDRBOUNDSERR, "ingress ExtB address bounds");
2009         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ADDRBOUNDSERR, data);
2010         nvswitch_clear_flags(&unhandled, bit);
2011     }
2012 
2013     bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_ADDRBOUNDSERR, 1);
2014     if (nvswitch_test_flags(pending, bit))
2015     {
2016         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_ADDRBOUNDSERR, "ingress MC address bounds");
2017         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ADDRBOUNDSERR, data);
2018         nvswitch_clear_flags(&unhandled, bit);
2019     }
2020 
2021     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2022 
2023     // Disable interrupts that have occurred after fatal error.
2024     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2025     if (device->link[link].fatal_error_occurred)
2026     {
2027         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0,
2028             report.raw_enable ^ pending);
2029     }
2030 
2031     if (report.raw_first & report.mask)
2032     {
2033         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FIRST_0,
2034             report.raw_first & report.mask);
2035     }
2036 
2037     if (unhandled != 0)
2038     {
2039         status = -NVL_MORE_PROCESSING_REQUIRED;
2040     }
2041 
2042 _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
2043     //
2044     // _ERR_STATUS_1
2045     //
2046     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_STATUS_1);
2047     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1);
2048     report.mask = report.raw_enable & chip_device->intr_mask.ingress[1].nonfatal;
2049 
2050     pending = (report.raw_pending & report.mask);
2051     pending_1 = pending;
2052 
2053     if ((pending_0 == 0) && (pending_1 == 0))
2054     {
2055         return -NVL_NOT_FOUND;
2056     }
2057 
2058     unhandled = pending;
2059     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_FIRST_1);
2060 
2061     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _EXTAREMAPTAB_ECC_LIMIT_ERR, 1);
2062     if (nvswitch_test_flags(pending, bit))
2063     {
2064         // Ignore LIMIT error if DBE is pending
2065         if (!(nvswitch_test_flags(raw_pending_0,
2066                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTAREMAPTAB_ECC_DBE_ERR, 1))))
2067         {
2068             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_COUNTER);
2069             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_LIMIT_ERR, "ingress ExtA remap ECC");
2070             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_LIMIT_ERR, data);
2071 
2072             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2073                 NVSWITCH_ERR_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2074                 NV_FALSE, 1);
2075 
2076             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2077         }
2078 
2079         nvswitch_clear_flags(&unhandled, bit);
2080     }
2081 
2082     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _EXTBREMAPTAB_ECC_LIMIT_ERR, 1);
2083     if (nvswitch_test_flags(pending, bit))
2084     {
2085         // Ignore LIMIT error if DBE is pending
2086         if (!(nvswitch_test_flags(raw_pending_0,
2087                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _EXTBREMAPTAB_ECC_DBE_ERR, 1))))
2088         {
2089             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_COUNTER);
2090             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_LIMIT_ERR, "ingress ExtB remap ECC");
2091             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_LIMIT_ERR, data);
2092 
2093             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2094                 NVSWITCH_ERR_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2095                 NV_FALSE, 1);
2096 
2097             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2098         }
2099 
2100         nvswitch_clear_flags(&unhandled, bit);
2101     }
2102 
2103     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _MCREMAPTAB_ECC_LIMIT_ERR, 1);
2104     if (nvswitch_test_flags(pending, bit))
2105     {
2106         // Ignore LIMIT error if DBE is pending
2107         if (!(nvswitch_test_flags(raw_pending_0,
2108                 DRF_NUM(_INGRESS, _ERR_STATUS_0, _MCREMAPTAB_ECC_DBE_ERR, 1))))
2109         {
2110             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_COUNTER);
2111             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_LIMIT_ERR, "ingress MC remap ECC");
2112             NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_LIMIT_ERR, data);
2113 
2114             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2115                 NVSWITCH_ERR_HW_NPORT_INGRESS_MCREMAPTAB_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2116                 NV_FALSE, 1);
2117 
2118             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2119         }
2120 
2121         nvswitch_clear_flags(&unhandled, bit);
2122     }
2123 
2124     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _MCCMDTOUCADDRERR, 1);
2125     if (nvswitch_test_flags(pending, bit))
2126     {
2127         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCCMDTOUCADDRERR, "ingress MC command to uc");
2128         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCCMDTOUCADDRERR, data);
2129         nvswitch_clear_flags(&unhandled, bit);
2130     }
2131 
2132     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _READMCREFLECTMEMERR, 1);
2133     if (nvswitch_test_flags(pending, bit))
2134     {
2135         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_READMCREFLECTMEMERR, "ingress read reflective");
2136         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_READMCREFLECTMEMERR, data);
2137         nvswitch_clear_flags(&unhandled, bit);
2138     }
2139 
2140     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _EXTAREMAPTAB_ADDRTYPEERR, 1);
2141     if (nvswitch_test_flags(pending, bit))
2142     {
2143         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_ADDRTYPEERR, "ingress ExtA address type");
2144         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ADDRTYPEERR, data);
2145         nvswitch_clear_flags(&unhandled, bit);
2146     }
2147 
2148     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _EXTBREMAPTAB_ADDRTYPEERR, 1);
2149     if (nvswitch_test_flags(pending, bit))
2150     {
2151         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_ADDRTYPEERR, "ingress ExtB address type");
2152         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ADDRTYPEERR, data);
2153         nvswitch_clear_flags(&unhandled, bit);
2154     }
2155 
2156     bit = DRF_NUM(_INGRESS, _ERR_STATUS_1, _MCREMAPTAB_ADDRTYPEERR, 1);
2157     if (nvswitch_test_flags(pending, bit))
2158     {
2159         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_ADDRTYPEERR, "ingress MC address type");
2160         NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ADDRTYPEERR, data);
2161         nvswitch_clear_flags(&unhandled, bit);
2162     }
2163 
2164     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2165 
2166     // Disable interrupts that have occurred after fatal error.
2167     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2168     if (device->link[link].fatal_error_occurred)
2169     {
2170         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1,
2171             report.raw_enable ^ pending);
2172     }
2173 
2174     if (report.raw_first & report.mask)
2175     {
2176         NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FIRST_1,
2177             report.raw_first & report.mask);
2178     }
2179 
2180     NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0, pending_0);
2181     NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_1, pending_1);
2182 
2183     if (unhandled != 0)
2184     {
2185         status = -NVL_MORE_PROCESSING_REQUIRED;
2186     }
2187 
2188     return status;
2189 }
2190 
2191 //
2192 // Tstate
2193 //
2194 
2195 static NvlStatus
2196 _nvswitch_service_tstate_nonfatal_ls10
2197 (
2198     nvswitch_device *device,
2199     NvU32            link
2200 )
2201 {
2202     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
2203     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
2204     NvU32 pending, bit, unhandled;
2205     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
2206     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
2207 
2208     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0);
2209     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0);
2210     report.mask = report.raw_enable & chip_device->intr_mask.tstate.nonfatal;
2211     report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_MISC_LOG_0);
2212     pending = report.raw_pending & report.mask;
2213 
2214     if (pending == 0)
2215     {
2216         return -NVL_NOT_FOUND;
2217     }
2218 
2219     unhandled = pending;
2220     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_FIRST_0);
2221 
2222     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1);
2223     if (nvswitch_test_flags(pending, bit))
2224     {
2225         // Ignore LIMIT error if DBE is pending
2226         if(!(nvswitch_test_flags(report.raw_pending,
2227                 DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1))))
2228         {
2229             NvBool bAddressValid = NV_FALSE;
2230             NvU32 address = 0;
2231             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2232                     _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
2233 
2234             if (FLD_TEST_DRF(_TSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2235                              addressValid))
2236             {
2237                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2238                                                    _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
2239                 bAddressValid = NV_TRUE;
2240             }
2241 
2242             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
2243             NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
2244                 DRF_DEF(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
2245             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, "TS tag store single-bit threshold");
2246             _nvswitch_collect_error_info_ls10(device, link,
2247                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2248                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2249                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2250                 &data);
2251             NVSWITCH_REPORT_DATA(_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, data);
2252 
2253             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2254                 NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, link,
2255                 bAddressValid, address, NV_FALSE, 1);
2256 
2257             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2258         }
2259 
2260         nvswitch_clear_flags(&unhandled, bit);
2261     }
2262 
2263     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1);
2264     if (nvswitch_test_flags(pending, bit))
2265     {
2266         // Ignore LIMIT error if DBE is pending
2267         if(!(nvswitch_test_flags(report.raw_pending,
2268                 DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1))))
2269         {
2270             NvBool bAddressValid = NV_FALSE;
2271             NvU32 address = 0;
2272             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2273                     _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
2274 
2275             if (FLD_TEST_DRF(_TSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2276                              addressValid))
2277             {
2278                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2279                                                    _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
2280                 bAddressValid = NV_TRUE;
2281             }
2282 
2283             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
2284             NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
2285                 DRF_DEF(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
2286             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, "TS crumbstore single-bit threshold");
2287             _nvswitch_collect_error_info_ls10(device, link,
2288                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
2289                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
2290                 NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
2291                 &data);
2292             NVSWITCH_REPORT_DATA(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, data);
2293 
2294             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2295                 NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, link,
2296                 bAddressValid, address, NV_FALSE, 1);
2297 
2298             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2299         }
2300 
2301         nvswitch_clear_flags(&unhandled, bit);
2302     }
2303 
2304     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2305 
2306     // Disable interrupts that have occurred after fatal error.
2307     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2308     if (device->link[link].fatal_error_occurred)
2309     {
2310         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0,
2311             report.raw_enable ^ pending);
2312     }
2313 
2314     if (report.raw_first & report.mask)
2315     {
2316         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_FIRST_0,
2317             report.raw_first & report.mask);
2318     }
2319 
2320     NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0, pending);
2321 
2322     if (unhandled != 0)
2323     {
2324         return -NVL_MORE_PROCESSING_REQUIRED;
2325     }
2326 
2327     return NVL_SUCCESS;
2328 }
2329 
2330 static NvlStatus
2331 _nvswitch_service_tstate_fatal_ls10
2332 (
2333     nvswitch_device *device,
2334     NvU32            link
2335 )
2336 {
2337     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
2338     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
2339     NvU32 pending, bit, contain, unhandled;
2340     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
2341     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
2342 
2343     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0);
2344     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_FATAL_REPORT_EN_0);
2345     report.mask = report.raw_enable & chip_device->intr_mask.tstate.fatal;
2346     report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_MISC_LOG_0);
2347     pending = report.raw_pending & report.mask;
2348 
2349     if (pending == 0)
2350     {
2351         return -NVL_NOT_FOUND;
2352     }
2353 
2354     unhandled = pending;
2355     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_FIRST_0);
2356     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_CONTAIN_EN_0);
2357 
2358     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOLBUFERR, 1);
2359     if (nvswitch_test_flags(pending, bit))
2360     {
2361         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_TAGPOOLBUFERR, "TS pointer crossover", NV_FALSE);
2362         _nvswitch_collect_error_info_ls10(device, link,
2363             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2364             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2365             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2366             &data);
2367         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_TAGPOOLBUFERR, data);
2368         nvswitch_clear_flags(&unhandled, bit);
2369     }
2370 
2371     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1);
2372     if (nvswitch_test_flags(pending, bit))
2373     {
2374         NvBool bAddressValid = NV_FALSE;
2375         NvU32 address = 0;
2376         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2377                 _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
2378 
2379         if (FLD_TEST_DRF(_TSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2380                          addressValid))
2381         {
2382             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2383                                                _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
2384             bAddressValid = NV_TRUE;
2385         }
2386 
2387         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
2388         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
2389             DRF_DEF(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
2390         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, "TS tag store fatal ECC", NV_FALSE);
2391         _nvswitch_collect_error_info_ls10(device, link,
2392             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2393             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2394             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2395             &data);
2396         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, data);
2397         nvswitch_clear_flags(&unhandled, bit);
2398 
2399         _nvswitch_construct_ecc_error_event_ls10(&err_event,
2400             NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, link, bAddressValid,
2401             address, NV_TRUE, 1);
2402 
2403         nvswitch_inforom_ecc_log_err_event(device, &err_event);
2404 
2405         // Clear associated LIMIT_ERR interrupt
2406         if (report.raw_pending & DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1))
2407         {
2408             NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0,
2409                 DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1));
2410         }
2411     }
2412 
2413     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTOREBUFERR, 1);
2414     if (nvswitch_test_flags(pending, bit))
2415     {
2416         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CRUMBSTOREBUFERR, "TS crumbstore", NV_FALSE);
2417         _nvswitch_collect_error_info_ls10(device, link,
2418             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2419             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2420             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2421             &data);
2422         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CRUMBSTOREBUFERR, data);
2423         nvswitch_clear_flags(&unhandled, bit);
2424     }
2425 
2426     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1);
2427     if (nvswitch_test_flags(pending, bit))
2428     {
2429         NvBool bAddressValid = NV_FALSE;
2430         NvU32 address = 0;
2431         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2432                 _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
2433 
2434         if (FLD_TEST_DRF(_TSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2435                          addressValid))
2436         {
2437             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE,
2438                                                _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
2439             bAddressValid = NV_TRUE;
2440         }
2441 
2442         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
2443         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
2444             DRF_DEF(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
2445         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, "TS crumbstore fatal ECC", NV_FALSE);
2446         _nvswitch_collect_error_info_ls10(device, link,
2447             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
2448             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
2449             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
2450             &data);
2451         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, data);
2452         nvswitch_clear_flags(&unhandled, bit);
2453 
2454         _nvswitch_construct_ecc_error_event_ls10(&err_event,
2455             NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, link, bAddressValid,
2456             address, NV_TRUE, 1);
2457 
2458         nvswitch_inforom_ecc_log_err_event(device, &err_event);
2459 
2460         // Clear associated LIMIT_ERR interrupt
2461         if (report.raw_pending & DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1))
2462         {
2463             NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0,
2464                 DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1));
2465         }
2466     }
2467 
2468     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _ATO_ERR, 1);
2469     if (nvswitch_test_flags(pending, bit))
2470     {
2471         if (FLD_TEST_DRF_NUM(_TSTATE, _ERR_FIRST_0, _ATO_ERR, 1, report.raw_first))
2472         {
2473             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_DEBUG);
2474         }
2475         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_ATO_ERR, "TS ATO timeout", NV_FALSE);
2476         nvswitch_clear_flags(&unhandled, bit);
2477     }
2478 
2479     bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CAMRSP_ERR, 1);
2480     if (nvswitch_test_flags(pending, bit))
2481     {
2482         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CAMRSP_ERR, "Rsp Tag value out of range", NV_FALSE);
2483         _nvswitch_collect_error_info_ls10(device, link,
2484             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
2485             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
2486             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
2487             &data);
2488         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CAMRSP_ERR, data);
2489         nvswitch_clear_flags(&unhandled, bit);
2490     }
2491 
2492     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2493 
2494     // Disable interrupts that have occurred after fatal error.
2495     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2496     if (device->link[link].fatal_error_occurred)
2497     {
2498         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_FATAL_REPORT_EN_0,
2499                 report.raw_enable ^ pending);
2500     }
2501 
2502     if (report.raw_first & report.mask)
2503     {
2504         NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_FIRST_0,
2505             report.raw_first & report.mask);
2506     }
2507 
2508     NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0, pending);
2509 
2510     if (unhandled != 0)
2511     {
2512         return -NVL_MORE_PROCESSING_REQUIRED;
2513     }
2514 
2515     return NVL_SUCCESS;
2516 }
2517 
2518 //
2519 // Egress
2520 //
2521 
2522 static NvlStatus
2523 _nvswitch_service_egress_nonfatal_ls10
2524 (
2525     nvswitch_device *device,
2526     NvU32            link
2527 )
2528 {
2529     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
2530     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
2531     NvU32 pending, bit, unhandled;
2532     NvU32 pending_0, pending_1;
2533     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
2534     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
2535     NvlStatus status = NVL_SUCCESS;
2536 
2537     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0);
2538     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0);
2539     report.mask = report.raw_enable & chip_device->intr_mask.egress[0].nonfatal;
2540     pending = report.raw_pending & report.mask;
2541     pending_0 = pending;
2542 
2543     if (pending == 0)
2544     {
2545         goto _nvswitch_service_egress_nonfatal_ls10_err_status_1;
2546     }
2547 
2548     unhandled = pending;
2549     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FIRST_0);
2550     _nvswitch_collect_error_info_ls10(device, link,
2551         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2552         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2553         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2554         &data);
2555 
2556     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1);
2557     if (nvswitch_test_flags(pending, bit))
2558     {
2559         // Ignore LIMIT error if DBE is pending
2560         if (!(nvswitch_test_flags(report.raw_pending,
2561                 DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_DBE_ERR, 1))))
2562         {
2563             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER);
2564             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, "egress input ECC error limit");
2565             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, data);
2566 
2567             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2568                 NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2569                 NV_FALSE, 1);
2570 
2571             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2572         }
2573 
2574         nvswitch_clear_flags(&unhandled, bit);
2575     }
2576 
2577     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1);
2578     if (nvswitch_test_flags(pending, bit))
2579     {
2580         // Ignore LIMIT error if DBE is pending
2581         if(!(nvswitch_test_flags(report.raw_pending,
2582                 DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_DBE_ERR, 1))))
2583         {
2584             NvBool bAddressValid = NV_FALSE;
2585             NvU32 address = 0;
2586             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS,
2587                     _ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID);
2588 
2589             if (FLD_TEST_DRF(_EGRESS_ERR_RAM_OUT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2590                              addressValid))
2591             {
2592                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS,
2593                                                    _ERR_RAM_OUT_ECC_ERROR_ADDRESS);
2594                 bAddressValid = NV_TRUE;
2595             }
2596 
2597             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER);
2598             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_ADDRESS);
2599             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, "egress output ECC error limit");
2600             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, data);
2601 
2602             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2603                 NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, link, bAddressValid, address,
2604                 NV_FALSE, 1);
2605 
2606             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2607         }
2608 
2609         nvswitch_clear_flags(&unhandled, bit);
2610     }
2611 
2612     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _PRIVRSPERR, 1);
2613     if (nvswitch_test_flags(pending, bit))
2614     {
2615         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_PRIVRSPERR, "egress non-posted PRIV error");
2616         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_PRIVRSPERR, data);
2617         nvswitch_clear_flags(&unhandled, bit);
2618     }
2619 
2620     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2621 
2622     // Disable interrupts that have occurred after fatal error.
2623     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2624     if (device->link[link].fatal_error_occurred)
2625     {
2626         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0,
2627             report.raw_enable ^ pending);
2628     }
2629 
2630     if (report.raw_first & report.mask)
2631     {
2632         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FIRST_0,
2633             report.raw_first & report.mask);
2634     }
2635 
2636     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0, pending);
2637 
2638     // HACK: Clear all pending interrupts!
2639     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0, 0xFFFFFFFF);
2640 
2641     if (unhandled != 0)
2642     {
2643         status = -NVL_MORE_PROCESSING_REQUIRED;
2644     }
2645 
2646 _nvswitch_service_egress_nonfatal_ls10_err_status_1:
2647     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1);
2648     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_1);
2649     report.mask = report.raw_enable & chip_device->intr_mask.egress[1].nonfatal;
2650     pending = report.raw_pending & report.mask;
2651     pending_1 = pending;
2652 
2653     if ((pending_0 == 0) && (pending_1 == 0))
2654     {
2655         return -NVL_NOT_FOUND;
2656     }
2657 
2658     unhandled = pending;
2659     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FIRST_1);
2660 
2661     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, 1);
2662     if (nvswitch_test_flags(pending, bit))
2663     {
2664         // Ignore LIMIT error if DBE is pending
2665         if (!(nvswitch_test_flags(report.raw_pending,
2666                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_ECC_DBE_ERR, 1))))
2667         {
2668             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, "egress reduction header ECC error limit");
2669             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, data);
2670 
2671             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2672                 NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2673                 NV_FALSE, 1);
2674 
2675             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2676         }
2677 
2678         nvswitch_clear_flags(&unhandled, bit);
2679     }
2680 
2681     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSPCTRLSTORE_ECC_LIMIT_ERR, 1);
2682     if (nvswitch_test_flags(pending, bit))
2683     {
2684         // Ignore LIMIT error if DBE is pending
2685         if (!(nvswitch_test_flags(report.raw_pending,
2686                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSPCTRLSTORE_ECC_DBE_ERR, 1))))
2687         {
2688             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_LIMIT_ERR, "egress MC response ECC error limit");
2689             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_LIMIT_ERR, data);
2690 
2691             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2692                 NVSWITCH_ERR_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2693                 NV_FALSE, 1);
2694 
2695             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2696         }
2697 
2698         nvswitch_clear_flags(&unhandled, bit);
2699     }
2700 
2701     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBCTRLSTORE_ECC_LIMIT_ERR, 1);
2702     if (nvswitch_test_flags(pending, bit))
2703     {
2704         // Ignore LIMIT error if DBE is pending
2705         if (!(nvswitch_test_flags(report.raw_pending,
2706                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBCTRLSTORE_ECC_DBE_ERR, 1))))
2707         {
2708             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_LIMIT_ERR, "egress RB ECC error limit");
2709             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_LIMIT_ERR, data);
2710 
2711             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2712                 NVSWITCH_ERR_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2713                 NV_FALSE, 1);
2714 
2715             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2716         }
2717 
2718         nvswitch_clear_flags(&unhandled, bit);
2719     }
2720 
2721     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDSGT_ECC_LIMIT_ERR, 1);
2722     if (nvswitch_test_flags(pending, bit))
2723     {
2724         // Ignore LIMIT error if DBE is pending
2725         if (!(nvswitch_test_flags(report.raw_pending,
2726                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDSGT_ECC_DBE_ERR, 1))))
2727         {
2728             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCREDSGT_ECC_LIMIT_ERR, "egress RSG ECC error limit");
2729             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCREDSGT_ECC_LIMIT_ERR, data);
2730 
2731             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2732                 NVSWITCH_ERR_HW_NPORT_EGRESS_MCREDSGT_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2733                 NV_FALSE, 1);
2734 
2735             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2736         }
2737 
2738         nvswitch_clear_flags(&unhandled, bit);
2739     }
2740 
2741     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDBUF_ECC_LIMIT_ERR, 1);
2742     if (nvswitch_test_flags(pending, bit))
2743     {
2744         // Ignore LIMIT error if DBE is pending
2745         if (!(nvswitch_test_flags(report.raw_pending,
2746                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDBUF_ECC_DBE_ERR, 1))))
2747         {
2748             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCREDBUF_ECC_LIMIT_ERR, "egress MCRB ECC error limit");
2749             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCREDBUF_ECC_LIMIT_ERR, data);
2750 
2751             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2752                 NVSWITCH_ERR_HW_NPORT_EGRESS_MCREDBUF_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2753                 NV_FALSE, 1);
2754 
2755             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2756         }
2757 
2758         nvswitch_clear_flags(&unhandled, bit);
2759     }
2760 
2761     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_RAM_HDR_ECC_LIMIT_ERR, 1);
2762     if (nvswitch_test_flags(pending, bit))
2763     {
2764         // Ignore LIMIT error if DBE is pending
2765         if (!(nvswitch_test_flags(report.raw_pending,
2766                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_RAM_HDR_ECC_DBE_ERR, 1))))
2767         {
2768             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_LIMIT_ERR, "egress MC header ECC error limit");
2769             NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_LIMIT_ERR, data);
2770 
2771             _nvswitch_construct_ecc_error_event_ls10(&err_event,
2772                 NVSWITCH_ERR_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0,
2773                 NV_FALSE, 1);
2774 
2775             nvswitch_inforom_ecc_log_err_event(device, &err_event);
2776         }
2777 
2778         nvswitch_clear_flags(&unhandled, bit);
2779     }
2780 
2781     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_ECC_DBE_ERR, 1);
2782     if (nvswitch_test_flags(pending, bit))
2783     {
2784         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_ECC_DBE_ERR, "egress reduction header ECC DBE error");
2785         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_ECC_DBE_ERR, data);
2786         nvswitch_clear_flags(&unhandled, bit);
2787 
2788         _nvswitch_construct_ecc_error_event_ls10(&err_event,
2789             NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_ECC_DBE_ERR, link, NV_FALSE, 0,
2790             NV_TRUE, 1);
2791 
2792         nvswitch_inforom_ecc_log_err_event(device, &err_event);
2793 
2794         // Clear associated LIMIT_ERR interrupt
2795         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, 1))
2796         {
2797             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
2798                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_ECC_LIMIT_ERR, 1));
2799         }
2800     }
2801 
2802     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_HDR_PARITY_ERR, 1);
2803     if (nvswitch_test_flags(pending, bit))
2804     {
2805         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_PARITY_ERR, "egress reduction header parity error");
2806         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_REDUCTION_HDR_PARITY_ERR, data);
2807         nvswitch_clear_flags(&unhandled, bit);
2808     }
2809 
2810     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _NXBAR_REDUCTION_FLITTYPE_MISMATCH_ERR, 1);
2811     if (nvswitch_test_flags(pending, bit))
2812     {
2813         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_REDUCTION_FLITTYPE_MISMATCH_ERR, "egress reduction flit mismatch error");
2814         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_REDUCTION_FLITTYPE_MISMATCH_ERR, data);
2815         nvswitch_clear_flags(&unhandled, bit);
2816     }
2817 
2818     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDBUF_ECC_DBE_ERR, 1);
2819     if (nvswitch_test_flags(pending, bit))
2820     {
2821         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCREDBUF_ECC_DBE_ERR, "egress reduction buffer ECC DBE error");
2822         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCREDBUF_ECC_DBE_ERR, data);
2823         nvswitch_clear_flags(&unhandled, bit);
2824 
2825         _nvswitch_construct_ecc_error_event_ls10(&err_event,
2826             NVSWITCH_ERR_HW_NPORT_EGRESS_MCREDBUF_ECC_DBE_ERR, link, NV_FALSE, 0,
2827             NV_TRUE, 1);
2828 
2829         nvswitch_inforom_ecc_log_err_event(device, &err_event);
2830 
2831         // Clear associated LIMIT_ERR interrupt
2832         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDBUF_ECC_LIMIT_ERR, 1))
2833         {
2834             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
2835                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDBUF_ECC_LIMIT_ERR, 1));
2836         }
2837     }
2838 
2839     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_CNT_ERR, 1);
2840     if (nvswitch_test_flags(pending, bit))
2841     {
2842         _nvswitch_collect_error_info_ls10(device, link,
2843             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_TIME |
2844             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_MISC |
2845             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_HDR,
2846             &data);
2847         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_MCRSP_CNT_ERR, "egress MC response count error");
2848         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_MCRSP_CNT_ERR, data);
2849         nvswitch_clear_flags(&unhandled, bit);
2850     }
2851 
2852     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBRSP_CNT_ERR, 1);
2853     if (nvswitch_test_flags(pending, bit))
2854     {
2855         _nvswitch_collect_error_info_ls10(device, link,
2856             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_TIME |
2857             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_MISC |
2858             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MC_HDR,
2859             &data);
2860         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_RBRSP_CNT_ERR, "egress reduction response count error");
2861         NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_RBRSP_CNT_ERR, data);
2862         nvswitch_clear_flags(&unhandled, bit);
2863     }
2864 
2865     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
2866 
2867     // Disable interrupts that have occurred after fatal error.
2868     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
2869     if (device->link[link].fatal_error_occurred)
2870     {
2871         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_1,
2872             report.raw_enable ^ pending);
2873     }
2874 
2875     if (report.raw_first & report.mask)
2876     {
2877         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FIRST_1,
2878             report.raw_first & report.mask);
2879     }
2880 
2881     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1, pending);
2882 
2883     // Clear all pending interrupts!
2884     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1, 0xFFFFFFFF);
2885 
2886     if (unhandled != 0)
2887     {
2888         status = -NVL_MORE_PROCESSING_REQUIRED;
2889     }
2890 
2891     return status;
2892 }
2893 
2894 static NvlStatus
2895 _nvswitch_service_egress_fatal_ls10
2896 (
2897     nvswitch_device *device,
2898     NvU32            link
2899 )
2900 {
2901     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
2902     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
2903     NvU32 pending, bit, contain, unhandled;
2904     NvU32 pending_0, pending_1;
2905     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
2906     NVSWITCH_RAW_ERROR_LOG_TYPE credit_data = {0, { 0 }};
2907     NVSWITCH_RAW_ERROR_LOG_TYPE buffer_data = {0, { 0 }};
2908     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
2909     NvlStatus status = NVL_SUCCESS;
2910 
2911     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0);
2912     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_0);
2913     report.mask = report.raw_enable & chip_device->intr_mask.egress[0].fatal;
2914     pending = report.raw_pending & report.mask;
2915     pending_0 = pending;
2916 
2917     if (pending == 0)
2918     {
2919         goto _nvswitch_service_egress_fatal_ls10_err_status_1;
2920     }
2921 
2922     unhandled = pending;
2923     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FIRST_0);
2924     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_CONTAIN_EN_0);
2925     _nvswitch_collect_error_info_ls10(device, link,
2926         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
2927         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
2928         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
2929         &data);
2930 
2931     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _EGRESSBUFERR, 1);
2932     if (nvswitch_test_flags(pending, bit))
2933     {
2934         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_EGRESSBUFERR, "egress crossbar overflow", NV_TRUE);
2935         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_EGRESSBUFERR, data);
2936 
2937         buffer_data.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS0);
2938         buffer_data.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS1);
2939         buffer_data.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS2);
2940         buffer_data.data[3] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS3);
2941         buffer_data.data[4] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS4);
2942         buffer_data.data[5] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS5);
2943         buffer_data.data[6] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS6);
2944         buffer_data.data[7] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _BUFFER_POINTERS7);
2945         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_EGRESSBUFERR, buffer_data);
2946         nvswitch_clear_flags(&unhandled, bit);
2947     }
2948 
2949     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _PKTROUTEERR, 1);
2950     if (nvswitch_test_flags(pending, bit))
2951     {
2952         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_PKTROUTEERR, "egress packet route", NV_TRUE);
2953         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_PKTROUTEERR, data);
2954         nvswitch_clear_flags(&unhandled, bit);
2955     }
2956 
2957     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _SEQIDERR, 1);
2958     if (nvswitch_test_flags(pending, bit))
2959     {
2960         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_SEQIDERR, "egress sequence ID error", NV_TRUE);
2961         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_SEQIDERR, data);
2962         nvswitch_clear_flags(&unhandled, bit);
2963     }
2964 
2965     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_DBE_ERR, 1);
2966     if (nvswitch_test_flags(pending, bit))
2967     {
2968         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, "egress input ECC DBE error", NV_FALSE);
2969         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, data);
2970         nvswitch_clear_flags(&unhandled, bit);
2971 
2972         _nvswitch_construct_ecc_error_event_ls10(&err_event,
2973             NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, link, NV_FALSE, 0,
2974             NV_TRUE, 1);
2975 
2976         nvswitch_inforom_ecc_log_err_event(device, &err_event);
2977 
2978         // Clear associated LIMIT_ERR interrupt
2979         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1))
2980         {
2981             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0,
2982                 DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1));
2983         }
2984     }
2985 
2986     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_DBE_ERR, 1);
2987     if (nvswitch_test_flags(pending, bit))
2988     {
2989         NvBool bAddressValid = NV_FALSE;
2990         NvU32 address = 0;
2991         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS,
2992                 _ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID);
2993 
2994         if (FLD_TEST_DRF(_EGRESS_ERR_RAM_OUT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
2995                          addressValid))
2996         {
2997             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS,
2998                                                _ERR_RAM_OUT_ECC_ERROR_ADDRESS);
2999             bAddressValid = NV_TRUE;
3000         }
3001 
3002         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, "egress output ECC DBE error", NV_FALSE);
3003         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, data);
3004         nvswitch_clear_flags(&unhandled, bit);
3005 
3006         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3007             NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, link, bAddressValid,
3008             address, NV_TRUE, 1);
3009 
3010         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3011 
3012         // Clear associated LIMIT_ERR interrupt
3013         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1))
3014         {
3015             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0,
3016                 DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1));
3017         }
3018     }
3019 
3020     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NCISOCCREDITOVFL, 1);
3021     if (nvswitch_test_flags(pending, bit))
3022     {
3023         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, "egress credit overflow", NV_FALSE);
3024         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, data);
3025 
3026         credit_data.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT0);
3027         credit_data.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT1);
3028         credit_data.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT2);
3029         credit_data.data[3] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT3);
3030         credit_data.data[4] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT4);
3031         credit_data.data[5] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT5);
3032         credit_data.data[6] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT6);
3033         credit_data.data[7] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT7);
3034         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, credit_data);
3035         nvswitch_clear_flags(&unhandled, bit);
3036     }
3037 
3038     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _REQTGTIDMISMATCHERR, 1);
3039     if (nvswitch_test_flags(pending, bit))
3040     {
3041         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_REQTGTIDMISMATCHERR, "egress destination request ID error", NV_FALSE);
3042         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_REQTGTIDMISMATCHERR, data);
3043         nvswitch_clear_flags(&unhandled, bit);
3044     }
3045 
3046     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RSPREQIDMISMATCHERR, 1);
3047     if (nvswitch_test_flags(pending, bit))
3048     {
3049         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_RSPREQIDMISMATCHERR, "egress destination response ID error", NV_FALSE);
3050         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_RSPREQIDMISMATCHERR, data);
3051         nvswitch_clear_flags(&unhandled, bit);
3052     }
3053 
3054     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _URRSPERR, 1);
3055     if (nvswitch_test_flags(pending, bit))
3056     {
3057         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_DROPNPURRSPERR, "egress non-posted UR error", NV_FALSE);
3058         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_DROPNPURRSPERR, data);
3059         nvswitch_clear_flags(&unhandled, bit);
3060     }
3061 
3062     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _HWRSPERR, 1);
3063     if (nvswitch_test_flags(pending, bit))
3064     {
3065         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_HWRSPERR, "egress non-posted HW error", NV_FALSE);
3066         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_HWRSPERR, data);
3067         nvswitch_clear_flags(&unhandled, bit);
3068     }
3069 
3070     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_PARITY_ERR, 1);
3071     if (nvswitch_test_flags(pending, bit))
3072     {
3073         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, "egress control parity error", NV_FALSE);
3074         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, data);
3075         nvswitch_clear_flags(&unhandled, bit);
3076 
3077         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3078             NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, link, NV_FALSE, 0,
3079             NV_TRUE, 1);
3080 
3081         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3082     }
3083 
3084     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NCISOC_CREDIT_PARITY_ERR, 1);
3085     if (nvswitch_test_flags(pending, bit))
3086     {
3087         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, "egress credit parity error", NV_FALSE);
3088         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, data);
3089 
3090         credit_data.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT0);
3091         credit_data.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT1);
3092         credit_data.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT2);
3093         credit_data.data[3] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT3);
3094         credit_data.data[4] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT4);
3095         credit_data.data[5] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT5);
3096         credit_data.data[6] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT6);
3097         credit_data.data[7] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _NCISOC_CREDIT7);
3098         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, credit_data);
3099         nvswitch_clear_flags(&unhandled, bit);
3100 
3101         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3102             NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, link, NV_FALSE, 0,
3103             NV_TRUE, 1);
3104 
3105         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3106     }
3107 
3108     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_FLITTYPE_MISMATCH_ERR, 1);
3109     if (nvswitch_test_flags(pending, bit))
3110     {
3111         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_FLITTYPE_MISMATCH_ERR, "egress flit type mismatch", NV_FALSE);
3112         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_FLITTYPE_MISMATCH_ERR, data);
3113         nvswitch_clear_flags(&unhandled, bit);
3114     }
3115 
3116     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _CREDIT_TIME_OUT_ERR, 1);
3117     if (nvswitch_test_flags(pending, bit))
3118     {
3119         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_CREDIT_TIME_OUT_ERR, "egress credit timeout", NV_FALSE);
3120         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_CREDIT_TIME_OUT_ERR, data);
3121         nvswitch_clear_flags(&unhandled, bit);
3122     }
3123 
3124     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_SIDEBAND_PD_PARITY_ERR, 1);
3125     if (nvswitch_test_flags(pending, bit))
3126     {
3127         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_SIDEBAND_PD_PARITY_ERR, "egress crossbar SB parity", NV_FALSE);
3128         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_SIDEBAND_PD_PARITY_ERR, data);
3129         nvswitch_clear_flags(&unhandled, bit);
3130     }
3131 
3132     bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _INVALIDVCSET_ERR, 1);
3133     if (nvswitch_test_flags(pending, bit))
3134     {
3135         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_INVALIDVCSET_ERR, "egress invalid VC set", NV_FALSE);
3136         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_INVALIDVCSET_ERR, data);
3137         nvswitch_clear_flags(&unhandled, bit);
3138     }
3139 
3140     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3141 
3142     // Disable interrupts that have occurred after fatal error.
3143     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3144     if (device->link[link].fatal_error_occurred)
3145     {
3146         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_0,
3147                 report.raw_enable ^ pending);
3148     }
3149 
3150     if (report.raw_first & report.mask)
3151     {
3152         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FIRST_0,
3153             report.raw_first & report.mask);
3154     }
3155 
3156     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_0, pending);
3157 
3158     if (unhandled != 0)
3159     {
3160         status = -NVL_MORE_PROCESSING_REQUIRED;
3161     }
3162 
3163 _nvswitch_service_egress_fatal_ls10_err_status_1:
3164     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1);
3165     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_1);
3166     report.mask = report.raw_enable & chip_device->intr_mask.egress[1].fatal;
3167     pending = report.raw_pending & report.mask;
3168     pending_1 = pending;
3169 
3170     if ((pending_0 == 0) && (pending_1 == 0))
3171     {
3172         return -NVL_NOT_FOUND;
3173     }
3174 
3175     unhandled = pending;
3176     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_FIRST_1);
3177     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_CONTAIN_EN_1);
3178 
3179     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSPCTRLSTORE_ECC_DBE_ERR, 1);
3180     if (nvswitch_test_flags(pending, bit))
3181     {
3182         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_DBE_ERR, "egress MC response ECC DBE error", NV_FALSE);
3183         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_DBE_ERR, data);
3184         nvswitch_clear_flags(&unhandled, bit);
3185 
3186         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3187             NVSWITCH_ERR_HW_NPORT_EGRESS_MCRSPCTRLSTORE_ECC_DBE_ERR, link, NV_FALSE, 0,
3188             NV_TRUE, 1);
3189 
3190         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3191 
3192         // Clear associated LIMIT_ERR interrupt
3193         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSPCTRLSTORE_ECC_LIMIT_ERR, 1))
3194         {
3195             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
3196                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSPCTRLSTORE_ECC_LIMIT_ERR, 1));
3197         }
3198     }
3199 
3200     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBCTRLSTORE_ECC_DBE_ERR, 1);
3201     if (nvswitch_test_flags(pending, bit))
3202     {
3203         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_DBE_ERR, "egress reduction ECC DBE error", NV_FALSE);
3204         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_DBE_ERR, data);
3205         nvswitch_clear_flags(&unhandled, bit);
3206 
3207         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3208             NVSWITCH_ERR_HW_NPORT_EGRESS_RBCTRLSTORE_ECC_DBE_ERR, link, NV_FALSE, 0,
3209             NV_TRUE, 1);
3210 
3211         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3212 
3213         // Clear associated LIMIT_ERR interrupt
3214         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBCTRLSTORE_ECC_LIMIT_ERR, 1))
3215         {
3216             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
3217                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _RBCTRLSTORE_ECC_LIMIT_ERR, 1));
3218         }
3219     }
3220 
3221     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDSGT_ECC_DBE_ERR, 1);
3222     if (nvswitch_test_flags(pending, bit))
3223     {
3224         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_MCREDSGT_ECC_DBE_ERR, "egress MC SG ECC DBE error", NV_FALSE);
3225         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_MCREDSGT_ECC_DBE_ERR, data);
3226         nvswitch_clear_flags(&unhandled, bit);
3227 
3228         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3229             NVSWITCH_ERR_HW_NPORT_EGRESS_MCREDSGT_ECC_DBE_ERR, link, NV_FALSE, 0,
3230             NV_TRUE, 1);
3231 
3232         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3233 
3234         // Clear associated LIMIT_ERR interrupt
3235         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDSGT_ECC_LIMIT_ERR, 1))
3236         {
3237             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
3238                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCREDSGT_ECC_LIMIT_ERR, 1));
3239         }
3240     }
3241 
3242     bit = DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_RAM_HDR_ECC_DBE_ERR, 1);
3243     if (nvswitch_test_flags(pending, bit))
3244     {
3245         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_DBE_ERR, "egress MC ram ECC DBE error", NV_FALSE);
3246         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_DBE_ERR, data);
3247         nvswitch_clear_flags(&unhandled, bit);
3248 
3249         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3250             NVSWITCH_ERR_HW_NPORT_EGRESS_MCRSP_RAM_HDR_ECC_DBE_ERR, link, NV_FALSE, 0,
3251             NV_TRUE, 1);
3252 
3253         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3254 
3255         // Clear associated LIMIT_ERR interrupt
3256         if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_RAM_HDR_ECC_LIMIT_ERR, 1))
3257         {
3258             NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1,
3259                 DRF_NUM(_EGRESS, _ERR_STATUS_1, _MCRSP_RAM_HDR_ECC_LIMIT_ERR, 1));
3260         }
3261     }
3262 
3263     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3264 
3265     // Disable interrupts that have occurred after fatal error.
3266     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3267     if (device->link[link].fatal_error_occurred)
3268     {
3269         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_1,
3270             report.raw_enable ^ pending);
3271     }
3272 
3273     if (report.raw_first & report.mask)
3274     {
3275         NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FIRST_1,
3276             report.raw_first & report.mask);
3277     }
3278 
3279     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1, pending);
3280 
3281     // Clear all pending interrupts!
3282     NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_STATUS_1, 0xFFFFFFFF);
3283 
3284     if (unhandled != 0)
3285     {
3286         status = -NVL_MORE_PROCESSING_REQUIRED;
3287     }
3288 
3289     return status;
3290 }
3291 
3292 static NvlStatus
3293 _nvswitch_service_sourcetrack_nonfatal_ls10
3294 (
3295     nvswitch_device *device,
3296     NvU32           link
3297 )
3298 {
3299     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3300     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3301     NvU32 pending, bit, unhandled;
3302     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3303 
3304     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link,
3305                             _SOURCETRACK, _ERR_STATUS_0);
3306     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link,
3307                             _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0);
3308     report.mask = report.raw_enable & chip_device->intr_mask.sourcetrack.nonfatal;
3309 
3310     pending = report.raw_pending & report.mask;
3311 
3312     if (pending == 0)
3313     {
3314         return -NVL_NOT_FOUND;
3315     }
3316 
3317     unhandled = pending;
3318     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK, _ERR_FIRST_0);
3319 
3320     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1);
3321     if (nvswitch_test_flags(pending, bit))
3322     {
3323         // Ignore LIMIT error if DBE is pending
3324         if (!(nvswitch_test_flags(report.raw_pending,
3325                 DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, 1))))
3326         {
3327             NvBool bAddressValid = NV_FALSE;
3328             NvU32 address = 0;
3329             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3330                     _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3331 
3332             if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID,
3333                              _VALID, _VALID, addressValid))
3334             {
3335                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3336                                                    _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS);
3337                 bAddressValid = NV_TRUE;
3338             }
3339 
3340             report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3341                                 _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER);
3342             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3343                                 _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS);
3344             report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3345                                 _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3346             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR,
3347                                     "sourcetrack TCEN0 crumbstore ECC limit err");
3348 
3349             _nvswitch_construct_ecc_error_event_ls10(&err_event,
3350                 NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, link,
3351                 bAddressValid, address, NV_FALSE, 1);
3352 
3353             nvswitch_inforom_ecc_log_err_event(device, &err_event);
3354         }
3355 
3356         nvswitch_clear_flags(&unhandled, bit);
3357     }
3358 
3359     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3360 
3361     //
3362     // Disable interrupts that have occurred after fatal error.
3363     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3364     //
3365     if (device->link[link].fatal_error_occurred)
3366     {
3367         NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0,
3368                 report.raw_enable ^ pending);
3369     }
3370 
3371     if (report.raw_first & report.mask)
3372     {
3373         NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_FIRST_0,
3374                 report.raw_first & report.mask);
3375     }
3376 
3377     NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_STATUS_0, pending);
3378 
3379     if (unhandled != 0)
3380     {
3381         return -NVL_MORE_PROCESSING_REQUIRED;
3382     }
3383 
3384     return NVL_SUCCESS;
3385 }
3386 
3387 static NvlStatus
3388 _nvswitch_service_sourcetrack_fatal_ls10
3389 (
3390     nvswitch_device *device,
3391     NvU32            link
3392 )
3393 {
3394     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3395     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3396     NvU32 pending, bit, contain, unhandled;
3397     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3398 
3399     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link,
3400                             _SOURCETRACK, _ERR_STATUS_0);
3401     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link,
3402                             _SOURCETRACK, _ERR_FATAL_REPORT_EN_0);
3403     report.mask = report.raw_enable & chip_device->intr_mask.sourcetrack.fatal;
3404     pending = report.raw_pending & report.mask;
3405 
3406     if (pending == 0)
3407     {
3408         return -NVL_NOT_FOUND;
3409     }
3410 
3411     unhandled = pending;
3412     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK, _ERR_FIRST_0);
3413     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK, _ERR_CONTAIN_EN_0);
3414 
3415     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, 1);
3416     if (nvswitch_test_flags(pending, bit))
3417     {
3418         NvBool bAddressValid = NV_FALSE;
3419         NvU32 address = 0;
3420         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3421                 _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3422 
3423         if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID,
3424                          _VALID, _VALID, addressValid))
3425         {
3426             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3427                                                _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS);
3428             bAddressValid = NV_TRUE;
3429         }
3430 
3431         report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3432                             _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS);
3433         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
3434                             _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3435         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR,
3436                                 "sourcetrack TCEN0 crumbstore DBE", NV_FALSE);
3437         nvswitch_clear_flags(&unhandled, bit);
3438 
3439         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3440             NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR,
3441             link, bAddressValid, address, NV_TRUE, 1);
3442 
3443         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3444 
3445         // Clear associated LIMIT_ERR interrupt
3446         if (report.raw_pending & DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1))
3447         {
3448             NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_STATUS_0,
3449                 DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1));
3450         }
3451     }
3452 
3453     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _DUP_CREQ_TCEN0_TAG_ERR, 1);
3454     if (nvswitch_test_flags(pending, bit))
3455     {
3456         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_DUP_CREQ_TCEN0_TAG_ERR,
3457                                 "sourcetrack duplicate CREQ", NV_FALSE);
3458         nvswitch_clear_flags(&unhandled, bit);
3459     }
3460 
3461     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _INVALID_TCEN0_RSP_ERR, 1);
3462     if (nvswitch_test_flags(pending, bit))
3463     {
3464         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_INVALID_TCEN0_RSP_ERR,
3465                                 "sourcetrack invalid TCEN0 CREQ", NV_FALSE);
3466         nvswitch_clear_flags(&unhandled, bit);
3467     }
3468 
3469     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _INVALID_TCEN1_RSP_ERR, 1);
3470     if (nvswitch_test_flags(pending, bit))
3471     {
3472         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_INVALID_TCEN1_RSP_ERR,
3473                                 "sourcetrack invalid TCEN1 CREQ", NV_FALSE);
3474         nvswitch_clear_flags(&unhandled, bit);
3475     }
3476 
3477     bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _SOURCETRACK_TIME_OUT_ERR, 1);
3478     if (nvswitch_test_flags(pending, bit))
3479     {
3480         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_SOURCETRACK_TIME_OUT_ERR,
3481                                 "sourcetrack timeout error", NV_FALSE);
3482         nvswitch_clear_flags(&unhandled, bit);
3483     }
3484 
3485     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3486 
3487     //
3488     // Disable interrupts that have occurred after fatal error.
3489     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3490     //
3491     if (device->link[link].fatal_error_occurred)
3492     {
3493         NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0,
3494                 report.raw_enable ^ pending);
3495     }
3496 
3497     if (report.raw_first & report.mask)
3498     {
3499         NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_FIRST_0,
3500             report.raw_first & report.mask);
3501     }
3502 
3503     NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_STATUS_0, pending);
3504 
3505     if (unhandled != 0)
3506     {
3507         return -NVL_MORE_PROCESSING_REQUIRED;
3508     }
3509 
3510     return NVL_SUCCESS;
3511 
3512 }
3513 
3514 //
3515 // Multicast Tstate
3516 //
3517 
3518 static NvlStatus
3519 _nvswitch_service_multicast_nonfatal_ls10
3520 (
3521     nvswitch_device *device,
3522     NvU32            link
3523 )
3524 {
3525     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3526     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3527     NvU32 pending, bit, unhandled;
3528     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
3529     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3530 
3531     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0);
3532     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0);
3533     report.mask = report.raw_enable & chip_device->intr_mask.mc_tstate.nonfatal;
3534     pending = report.raw_pending & report.mask;
3535 
3536     if (pending == 0)
3537     {
3538         return -NVL_NOT_FOUND;
3539     }
3540 
3541     unhandled = pending;
3542     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FIRST_0);
3543     _nvswitch_collect_error_info_ls10(device, link,
3544         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_MC_TIME,
3545         &data);
3546 
3547     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1);
3548     if (nvswitch_test_flags(pending, bit))
3549     {
3550         // Ignore LIMIT error if DBE is pending
3551         if(!(nvswitch_test_flags(report.raw_pending,
3552                 DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1))))
3553         {
3554             NvBool bAddressValid = NV_FALSE;
3555             NvU32 address = 0;
3556             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3557                     _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
3558 
3559             if (FLD_TEST_DRF(_MULTICASTTSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3560                              addressValid))
3561             {
3562                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3563                                                    _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
3564                 bAddressValid = NV_TRUE;
3565             }
3566 
3567             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
3568             NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3569                 DRF_DEF(_MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3570             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_LIMIT_ERR, "MC TS tag store single-bit threshold");
3571             NVSWITCH_REPORT_DATA(_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_LIMIT_ERR, data);
3572 
3573             _nvswitch_construct_ecc_error_event_ls10(&err_event,
3574                 NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_LIMIT_ERR, link,
3575                 bAddressValid, address, NV_FALSE, 1);
3576 
3577             nvswitch_inforom_ecc_log_err_event(device, &err_event);
3578         }
3579 
3580         nvswitch_clear_flags(&unhandled, bit);
3581     }
3582 
3583     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1);
3584     if (nvswitch_test_flags(pending, bit))
3585     {
3586         // Ignore LIMIT error if DBE is pending
3587         if(!(nvswitch_test_flags(report.raw_pending,
3588                 DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1))))
3589         {
3590             NvBool bAddressValid = NV_FALSE;
3591             NvU32 address = 0;
3592             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3593                     _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3594 
3595             if (FLD_TEST_DRF(_MULTICASTTSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3596                              addressValid))
3597             {
3598                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3599                                                    _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
3600                 bAddressValid = NV_TRUE;
3601             }
3602 
3603             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
3604             NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3605                 DRF_DEF(_MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3606             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, "MC TS crumbstore single-bit threshold");
3607             NVSWITCH_REPORT_DATA(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, data);
3608 
3609             _nvswitch_construct_ecc_error_event_ls10(&err_event,
3610                 NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, link,
3611                 bAddressValid, address, NV_FALSE, 1);
3612 
3613             nvswitch_inforom_ecc_log_err_event(device, &err_event);
3614         }
3615 
3616         nvswitch_clear_flags(&unhandled, bit);
3617     }
3618 
3619     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_MCTO_ERR, 1);
3620     if (nvswitch_test_flags(pending, bit))
3621     {
3622         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_MCTO_ERR, "MC TS crumbstore MCTO");
3623         _nvswitch_collect_error_info_ls10(device, link,
3624             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
3625             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
3626             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
3627             &data);
3628         NVSWITCH_REPORT_DATA(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_MCTO_ERR, data);
3629 
3630         nvswitch_clear_flags(&unhandled, bit);
3631     }
3632 
3633     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3634 
3635     // Disable interrupts that have occurred after fatal error.
3636     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3637     if (device->link[link].fatal_error_occurred)
3638     {
3639         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
3640             report.raw_enable ^ pending);
3641     }
3642 
3643     if (report.raw_first & report.mask)
3644     {
3645         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FIRST_0,
3646             report.raw_first & report.mask);
3647     }
3648 
3649     NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0, pending);
3650 
3651     if (unhandled != 0)
3652     {
3653         return -NVL_MORE_PROCESSING_REQUIRED;
3654     }
3655 
3656     return NVL_SUCCESS;
3657 }
3658 
3659 static NvlStatus
3660 _nvswitch_service_multicast_fatal_ls10
3661 (
3662     nvswitch_device *device,
3663     NvU32            link
3664 )
3665 {
3666     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3667     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3668     NvU32 pending, bit, contain, unhandled;
3669     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
3670     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3671 
3672     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0);
3673     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0);
3674     report.mask = report.raw_enable & chip_device->intr_mask.mc_tstate.fatal;
3675     pending = report.raw_pending & report.mask;
3676 
3677     if (pending == 0)
3678     {
3679         return -NVL_NOT_FOUND;
3680     }
3681 
3682     unhandled = pending;
3683     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FIRST_0);
3684     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_CONTAIN_EN_0);
3685     _nvswitch_collect_error_info_ls10(device, link,
3686         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_MC_TIME,
3687         &data);
3688 
3689     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1);
3690     if (nvswitch_test_flags(pending, bit))
3691     {
3692         NvBool bAddressValid = NV_FALSE;
3693         NvU32 address = 0;
3694         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3695                 _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
3696 
3697         if (FLD_TEST_DRF(_MULTICASTTSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3698                          addressValid))
3699         {
3700             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3701                                                _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
3702             bAddressValid = NV_TRUE;
3703         }
3704 
3705         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
3706         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3707             DRF_DEF(_MULTICASTTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3708         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_DBE_ERR, "MC TS tag store fatal ECC", NV_FALSE);
3709         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_DBE_ERR, data);
3710         nvswitch_clear_flags(&unhandled, bit);
3711 
3712         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3713             NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE_TAGPOOL_ECC_DBE_ERR, link, bAddressValid,
3714             address, NV_TRUE, 1);
3715 
3716         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3717 
3718         // Clear associated LIMIT_ERR interrupt
3719         if (report.raw_pending & DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1))
3720         {
3721             NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0,
3722                 DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1));
3723         }
3724     }
3725 
3726     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1);
3727     if (nvswitch_test_flags(pending, bit))
3728     {
3729         NvBool bAddressValid = NV_FALSE;
3730         NvU32 address = 0;
3731         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3732                 _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3733 
3734         if (FLD_TEST_DRF(_MULTICASTTSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3735                          addressValid))
3736         {
3737             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE,
3738                                                _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
3739             bAddressValid = NV_TRUE;
3740         }
3741 
3742         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
3743         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3744             DRF_DEF(_MULTICASTTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3745         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_DBE_ERR, "MC TS crumbstore fatal ECC", NV_FALSE);
3746         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_DBE_ERR, data);
3747         nvswitch_clear_flags(&unhandled, bit);
3748 
3749         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3750             NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_ECC_DBE_ERR, link, bAddressValid,
3751             address, NV_TRUE, 1);
3752 
3753         nvswitch_inforom_ecc_log_err_event(device, &err_event);
3754 
3755         // Clear associated LIMIT_ERR interrupt
3756         if (report.raw_pending & DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1))
3757         {
3758             NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0,
3759                 DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1));
3760         }
3761     }
3762 
3763     bit = DRF_NUM(_MULTICASTTSTATE, _ERR_STATUS_0, _CRUMBSTORE_BUF_OVERWRITE_ERR, 1);
3764     if (nvswitch_test_flags(pending, bit))
3765     {
3766         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_BUF_OVERWRITE_ERR, "MC crumbstore overwrite", NV_FALSE);
3767         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_MULTICASTTSTATE_CRUMBSTORE_BUF_OVERWRITE_ERR, data);
3768         nvswitch_clear_flags(&unhandled, bit);
3769     }
3770 
3771     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3772 
3773     // Disable interrupts that have occurred after fatal error.
3774     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3775     if (device->link[link].fatal_error_occurred)
3776     {
3777         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0,
3778                 report.raw_enable ^ pending);
3779     }
3780 
3781     if (report.raw_first & report.mask)
3782     {
3783         NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FIRST_0,
3784             report.raw_first & report.mask);
3785     }
3786 
3787     NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0, pending);
3788 
3789     if (unhandled != 0)
3790     {
3791         return -NVL_MORE_PROCESSING_REQUIRED;
3792     }
3793 
3794     return NVL_SUCCESS;
3795 }
3796 
3797 //
3798 // Reduction Tstate
3799 //
3800 
3801 static NvlStatus
3802 _nvswitch_service_reduction_nonfatal_ls10
3803 (
3804     nvswitch_device *device,
3805     NvU32            link
3806 )
3807 {
3808     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3809     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3810     NvU32 pending, bit, unhandled;
3811     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
3812     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3813 
3814     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0);
3815     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0);
3816     report.mask = report.raw_enable & chip_device->intr_mask.mc_tstate.nonfatal;
3817     pending = report.raw_pending & report.mask;
3818 
3819     if (pending == 0)
3820     {
3821         return -NVL_NOT_FOUND;
3822     }
3823 
3824     unhandled = pending;
3825     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FIRST_0);
3826     _nvswitch_collect_error_info_ls10(device, link,
3827         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_RED_TIME,
3828         &data);
3829 
3830     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1);
3831     if (nvswitch_test_flags(pending, bit))
3832     {
3833         // Ignore LIMIT error if DBE is pending
3834         if(!(nvswitch_test_flags(report.raw_pending,
3835                 DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1))))
3836         {
3837             NvBool bAddressValid = NV_FALSE;
3838             NvU32 address = 0;
3839             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3840                     _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
3841 
3842             if (FLD_TEST_DRF(_REDUCTIONTSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3843                              addressValid))
3844             {
3845                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3846                                                    _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
3847                 bAddressValid = NV_TRUE;
3848             }
3849 
3850             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
3851             NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3852                 DRF_DEF(_REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3853             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_LIMIT_ERR, "Red TS tag store single-bit threshold");
3854             NVSWITCH_REPORT_DATA(_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_LIMIT_ERR, data);
3855 
3856             _nvswitch_construct_ecc_error_event_ls10(&err_event,
3857                 NVSWITCH_ERR_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_LIMIT_ERR, link,
3858                 bAddressValid, address, NV_FALSE, 1);
3859 
3860             nvswitch_inforom_ecc_log_err_event(device, &err_event);
3861         }
3862 
3863         nvswitch_clear_flags(&unhandled, bit);
3864     }
3865 
3866     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1);
3867     if (nvswitch_test_flags(pending, bit))
3868     {
3869         // Ignore LIMIT error if DBE is pending
3870         if(!(nvswitch_test_flags(report.raw_pending,
3871                 DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1))))
3872         {
3873             NvBool bAddressValid = NV_FALSE;
3874             NvU32 address = 0;
3875             NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3876                     _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
3877 
3878             if (FLD_TEST_DRF(_REDUCTIONTSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3879                              addressValid))
3880             {
3881                 address = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3882                                                    _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
3883                 bAddressValid = NV_TRUE;
3884             }
3885 
3886             report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
3887             NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3888                 DRF_DEF(_REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3889             NVSWITCH_REPORT_NONFATAL(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, "Red TS crumbstore single-bit threshold");
3890             NVSWITCH_REPORT_DATA(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, data);
3891 
3892             _nvswitch_construct_ecc_error_event_ls10(&err_event,
3893                 NVSWITCH_ERR_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_LIMIT_ERR, link,
3894                 bAddressValid, address, NV_FALSE, 1);
3895 
3896             nvswitch_inforom_ecc_log_err_event(device, &err_event);
3897         }
3898 
3899         nvswitch_clear_flags(&unhandled, bit);
3900     }
3901 
3902     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_RTO_ERR, 1);
3903     if (nvswitch_test_flags(pending, bit))
3904     {
3905         NVSWITCH_REPORT_NONFATAL(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_RTO_ERR, "Red TS crumbstore RTO");
3906         NVSWITCH_REPORT_DATA(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_RTO_ERR, data);
3907 
3908         nvswitch_clear_flags(&unhandled, bit);
3909     }
3910 
3911     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
3912 
3913     // Disable interrupts that have occurred after fatal error.
3914     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
3915     if (device->link[link].fatal_error_occurred)
3916     {
3917         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
3918             report.raw_enable ^ pending);
3919     }
3920 
3921     if (report.raw_first & report.mask)
3922     {
3923         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FIRST_0,
3924             report.raw_first & report.mask);
3925     }
3926 
3927     NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0, pending);
3928 
3929     if (unhandled != 0)
3930     {
3931         return -NVL_MORE_PROCESSING_REQUIRED;
3932     }
3933 
3934     return NVL_SUCCESS;
3935 }
3936 
3937 static NvlStatus
3938 _nvswitch_service_reduction_fatal_ls10
3939 (
3940     nvswitch_device *device,
3941     NvU32            link
3942 )
3943 {
3944     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
3945     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
3946     NvU32 pending, bit, contain, unhandled;
3947     NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
3948     INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
3949 
3950     report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0);
3951     report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0);
3952     report.mask = report.raw_enable & chip_device->intr_mask.mc_tstate.fatal;
3953     pending = report.raw_pending & report.mask;
3954 
3955     if (pending == 0)
3956     {
3957         return -NVL_NOT_FOUND;
3958     }
3959 
3960     unhandled = pending;
3961     report.raw_first = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FIRST_0);
3962     contain = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_CONTAIN_EN_0);
3963     _nvswitch_collect_error_info_ls10(device, link,
3964         NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_RED_TIME,
3965         &data);
3966 
3967     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1);
3968     if (nvswitch_test_flags(pending, bit))
3969     {
3970         NvBool bAddressValid = NV_FALSE;
3971         NvU32 address = 0;
3972         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3973                 _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID);
3974 
3975         if (FLD_TEST_DRF(_REDUCTIONTSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
3976                          addressValid))
3977         {
3978             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
3979                                                _ERR_TAGPOOL_ECC_ERROR_ADDRESS);
3980             bAddressValid = NV_TRUE;
3981         }
3982 
3983         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER);
3984         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3985             DRF_DEF(_REDUCTIONTSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
3986         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_DBE_ERR, "Red TS tag store fatal ECC", NV_FALSE);
3987         _nvswitch_collect_error_info_ls10(device, link,
3988             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_TIME |
3989             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_MISC |
3990             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_EGRESS_HDR,
3991             &data);
3992         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_DBE_ERR, data);
3993         nvswitch_clear_flags(&unhandled, bit);
3994 
3995         _nvswitch_construct_ecc_error_event_ls10(&err_event,
3996             NVSWITCH_ERR_HW_NPORT_REDUCTIONTSTATE_TAGPOOL_ECC_DBE_ERR, link, bAddressValid,
3997             address, NV_TRUE, 1);
3998 
3999         nvswitch_inforom_ecc_log_err_event(device, &err_event);
4000 
4001         // Clear associated LIMIT_ERR interrupt
4002         if (report.raw_pending & DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1))
4003         {
4004             NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0,
4005                 DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1));
4006         }
4007     }
4008 
4009     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1);
4010     if (nvswitch_test_flags(pending, bit))
4011     {
4012         NvBool bAddressValid = NV_FALSE;
4013         NvU32 address = 0;
4014         NvU32 addressValid = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
4015                 _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
4016 
4017         if (FLD_TEST_DRF(_REDUCTIONTSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID,
4018                          addressValid))
4019         {
4020             address = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE,
4021                                                _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS);
4022             bAddressValid = NV_TRUE;
4023         }
4024 
4025         report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER);
4026         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
4027             DRF_DEF(_REDUCTIONTSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT));
4028         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_DBE_ERR, "Red TS crumbstore fatal ECC", NV_FALSE);
4029         _nvswitch_collect_error_info_ls10(device, link,
4030             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
4031             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
4032             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
4033             &data);
4034         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_DBE_ERR, data);
4035         nvswitch_clear_flags(&unhandled, bit);
4036 
4037         _nvswitch_construct_ecc_error_event_ls10(&err_event,
4038             NVSWITCH_ERR_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_ECC_DBE_ERR, link, bAddressValid,
4039             address, NV_TRUE, 1);
4040 
4041         nvswitch_inforom_ecc_log_err_event(device, &err_event);
4042 
4043         // Clear associated LIMIT_ERR interrupt
4044         if (report.raw_pending & DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1))
4045         {
4046             NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0,
4047                 DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1));
4048         }
4049     }
4050 
4051     bit = DRF_NUM(_REDUCTIONTSTATE, _ERR_STATUS_0, _CRUMBSTORE_BUF_OVERWRITE_ERR, 1);
4052     if (nvswitch_test_flags(pending, bit))
4053     {
4054         NVSWITCH_REPORT_CONTAIN(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_BUF_OVERWRITE_ERR, "Red crumbstore overwrite", NV_FALSE);
4055         _nvswitch_collect_error_info_ls10(device, link,
4056             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_TIME |
4057             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_MISC |
4058             NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_INGRESS_HDR,
4059             &data);
4060         NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_REDUCTIONTSTATE_CRUMBSTORE_BUF_OVERWRITE_ERR, data);
4061         nvswitch_clear_flags(&unhandled, bit);
4062     }
4063 
4064     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4065 
4066     // Disable interrupts that have occurred after fatal error.
4067     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
4068     if (device->link[link].fatal_error_occurred)
4069     {
4070         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0,
4071                 report.raw_enable ^ pending);
4072     }
4073 
4074     if (report.raw_first & report.mask)
4075     {
4076         NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FIRST_0,
4077             report.raw_first & report.mask);
4078     }
4079 
4080     NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0, pending);
4081 
4082     if (unhandled != 0)
4083     {
4084         return -NVL_MORE_PROCESSING_REQUIRED;
4085     }
4086 
4087     return NVL_SUCCESS;
4088 }
4089 
4090 static NvlStatus
4091 _nvswitch_service_nport_fatal_ls10
4092 (
4093     nvswitch_device *device,
4094     NvU32            link
4095 )
4096 {
4097     NvlStatus status[7];
4098 
4099     status[0] = _nvswitch_service_route_fatal_ls10(device, link);
4100     status[1] = _nvswitch_service_ingress_fatal_ls10(device, link);
4101     status[2] = _nvswitch_service_egress_fatal_ls10(device, link);
4102     status[3] = _nvswitch_service_tstate_fatal_ls10(device, link);
4103     status[4] = _nvswitch_service_sourcetrack_fatal_ls10(device, link);
4104     status[5] = _nvswitch_service_multicast_fatal_ls10(device, link);
4105     status[6] = _nvswitch_service_reduction_fatal_ls10(device, link);
4106 
4107     if ((status[0] != NVL_SUCCESS) &&
4108         (status[1] != NVL_SUCCESS) &&
4109         (status[2] != NVL_SUCCESS) &&
4110         (status[3] != NVL_SUCCESS) &&
4111         (status[4] != NVL_SUCCESS) &&
4112         (status[5] != NVL_SUCCESS) &&
4113         (status[6] != NVL_SUCCESS))
4114     {
4115         return -NVL_MORE_PROCESSING_REQUIRED;
4116     }
4117 
4118     return NVL_SUCCESS;
4119 }
4120 
4121 static NvlStatus
4122 _nvswitch_service_npg_fatal_ls10
4123 (
4124     nvswitch_device *device,
4125     NvU32            npg
4126 )
4127 {
4128     NvU32 pending, mask, bit, unhandled;
4129     NvU32 nport;
4130     NvU32 link;
4131 
4132     pending = NVSWITCH_ENG_RD32(device, NPG, , npg, _NPG, _NPG_INTERRUPT_STATUS);
4133 
4134     if (pending == 0)
4135     {
4136         return -NVL_NOT_FOUND;
4137     }
4138 
4139     mask =
4140         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL) |
4141         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL) |
4142         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL) |
4143         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _FATAL);
4144     pending &= mask;
4145     unhandled = pending;
4146 
4147     for (nport = 0; nport < NVSWITCH_NPORT_PER_NPG_LS10; nport++)
4148     {
4149         switch (nport)
4150         {
4151             case 0:
4152                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL);
4153                 break;
4154             case 1:
4155                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL);
4156                 break;
4157             case 2:
4158                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL);
4159                 break;
4160             case 3:
4161                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _FATAL);
4162                 break;
4163         }
4164         if (nvswitch_test_flags(pending, bit))
4165         {
4166             link = NPORT_TO_LINK_LS10(device, npg, nport);
4167             if (NVSWITCH_ENG_IS_VALID(device, NPORT, link))
4168             {
4169                 if (_nvswitch_service_nport_fatal_ls10(device, link) == NVL_SUCCESS)
4170                 {
4171                     nvswitch_clear_flags(&unhandled, bit);
4172                 }
4173             }
4174         }
4175     }
4176 
4177     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4178 
4179     if (unhandled != 0)
4180     {
4181         return -NVL_MORE_PROCESSING_REQUIRED;
4182     }
4183 
4184     return NVL_SUCCESS;
4185 }
4186 
4187 static NvlStatus
4188 _nvswitch_service_nport_nonfatal_ls10
4189 (
4190     nvswitch_device *device,
4191     NvU32            link
4192 )
4193 {
4194     NvlStatus status[7];
4195 
4196     status[0] = _nvswitch_service_route_nonfatal_ls10(device, link);
4197     status[1] = _nvswitch_service_ingress_nonfatal_ls10(device, link);
4198     status[2] = _nvswitch_service_egress_nonfatal_ls10(device, link);
4199     status[3] = _nvswitch_service_tstate_nonfatal_ls10(device, link);
4200     status[4] = _nvswitch_service_sourcetrack_nonfatal_ls10(device, link);
4201     status[5] = _nvswitch_service_multicast_nonfatal_ls10(device, link);
4202     status[6] = _nvswitch_service_reduction_nonfatal_ls10(device, link);
4203 
4204     if ((status[0] != NVL_SUCCESS) &&
4205         (status[1] != NVL_SUCCESS) &&
4206         (status[2] != NVL_SUCCESS) &&
4207         (status[3] != NVL_SUCCESS) &&
4208         (status[4] != NVL_SUCCESS) &&
4209         (status[5] != NVL_SUCCESS) &&
4210         (status[6] != NVL_SUCCESS))
4211     {
4212         return -NVL_MORE_PROCESSING_REQUIRED;
4213     }
4214 
4215     return NVL_SUCCESS;
4216 }
4217 
4218 static NvlStatus
4219 _nvswitch_service_npg_nonfatal_ls10
4220 (
4221     nvswitch_device *device,
4222     NvU32 npg
4223 )
4224 {
4225     NvU32 pending, mask, bit, unhandled;
4226     NvU32 nport;
4227     NvU32 link;
4228 
4229     pending = NVSWITCH_ENG_RD32(device, NPG, , npg, _NPG, _NPG_INTERRUPT_STATUS);
4230 
4231     if (pending == 0)
4232     {
4233         return -NVL_NOT_FOUND;
4234     }
4235 
4236     mask =
4237         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL) |
4238         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL) |
4239         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL) |
4240         DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _NONFATAL);
4241     pending &= mask;
4242     unhandled = pending;
4243 
4244     for (nport = 0; nport < NVSWITCH_NPORT_PER_NPG_LS10; nport++)
4245     {
4246         switch (nport)
4247         {
4248             case 0:
4249                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL);
4250                 break;
4251             case 1:
4252                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL);
4253                 break;
4254             case 2:
4255                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL);
4256                 break;
4257             case 3:
4258                 bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _NONFATAL);
4259                 break;
4260         }
4261         if (nvswitch_test_flags(pending, bit))
4262         {
4263             link = NPORT_TO_LINK_LS10(device, npg, nport);
4264             if (NVSWITCH_ENG_IS_VALID(device, NPORT, link))
4265             {
4266                 if (_nvswitch_service_nport_nonfatal_ls10(device, link) == NVL_SUCCESS)
4267                 {
4268                     nvswitch_clear_flags(&unhandled, bit);
4269                 }
4270             }
4271         }
4272     }
4273 
4274     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4275 
4276     if (unhandled != 0)
4277     {
4278         return -NVL_MORE_PROCESSING_REQUIRED;
4279     }
4280 
4281     return NVL_SUCCESS;
4282 }
4283 
4284 static NvlStatus
4285 _nvswitch_service_nvldl_fatal_ls10
4286 (
4287     nvswitch_device *device,
4288     NvU32 nvlipt_instance
4289 )
4290 {
4291     NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, runtimeErrorMask = 0;
4292     NvU32 i;
4293     nvlink_link *link;
4294     NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED;
4295     NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo = { 0 };
4296     NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo = { 0 };
4297 
4298     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
4299     localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
4300     localEnabledLinkMask = enabledLinkMask & localLinkMask;
4301 
4302     FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
4303     {
4304         link = nvswitch_get_link(device, i);
4305         if (link == NULL)
4306         {
4307             // An interrupt on an invalid link should never occur
4308             NVSWITCH_ASSERT(link != NULL);
4309             continue;
4310         }
4311 
4312         if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance)
4313         {
4314             NVSWITCH_ASSERT(0);
4315             break;
4316         }
4317 
4318         if (nvswitch_is_link_in_reset(device, link))
4319         {
4320             continue;
4321         }
4322 
4323         if (device->hal.nvswitch_service_nvldl_fatal_link(device, nvlipt_instance, i) == NVL_SUCCESS)
4324         {
4325             runtimeErrorMask |= NVBIT64(i);
4326             status = NVL_SUCCESS;
4327         }
4328     }
4329     FOR_EACH_INDEX_IN_MASK_END;
4330 
4331     linkTrainingErrorInfo.isValid = NV_FALSE;
4332     linkRuntimeErrorInfo.isValid  = NV_TRUE;
4333     linkRuntimeErrorInfo.mask0    = runtimeErrorMask;
4334 
4335     // Check runtimeErrorMask is non-zero before consuming it further.
4336     if ((runtimeErrorMask != 0) &&
4337         (nvswitch_smbpbi_set_link_error_info(device,
4338             &linkTrainingErrorInfo, &linkRuntimeErrorInfo) != NVL_SUCCESS))
4339     {
4340         NVSWITCH_PRINT(device, ERROR,
4341                        "%s: NVLDL[0x%x, 0x%llx]: Unable to send Runtime Error bitmask: 0x%llx,\n",
4342                        __FUNCTION__,
4343                        nvlipt_instance, localLinkMask,
4344                        runtimeErrorMask);
4345     }
4346 
4347     return status;
4348 }
4349 
4350 static NvlStatus
4351 _nvswitch_service_nvltlc_tx_sys_fatal_ls10
4352 (
4353     nvswitch_device *device,
4354     NvU32 nvlipt_instance,
4355     NvU32 link
4356 )
4357 {
4358     NvU32 pending, bit, unhandled;
4359     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4360 
4361     report.raw_pending = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_STATUS_0);
4362     report.raw_enable = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FATAL_REPORT_EN_0);
4363     report.mask = report.raw_enable;
4364     pending = report.raw_pending & report.mask;
4365 
4366     if (pending == 0)
4367     {
4368         return -NVL_NOT_FOUND;
4369     }
4370 
4371     unhandled = pending;
4372     report.raw_first = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FIRST_0);
4373 
4374     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1);
4375     if (nvswitch_test_flags(pending, bit))
4376     {
4377         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_PARITY_ERR, "NCISOC Parity Error", NV_FALSE);
4378         nvswitch_clear_flags(&unhandled, bit);
4379     }
4380 
4381     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1);
4382     if (nvswitch_test_flags(pending, bit))
4383     {
4384         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_HDR_ECC_DBE_ERR, "NCISOC HDR ECC DBE Error", NV_FALSE);
4385         nvswitch_clear_flags(&unhandled, bit);
4386     }
4387 
4388     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_DAT_ECC_DBE_ERR, 1);
4389     if (nvswitch_test_flags(pending, bit))
4390     {
4391         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_DAT_ECC_DBE_ERR, "NCISOC DAT ECC DBE Error", NV_FALSE);
4392         nvswitch_clear_flags(&unhandled, bit);
4393     }
4394 
4395     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_ECC_LIMIT_ERR, 1);
4396     if (nvswitch_test_flags(pending, bit))
4397     {
4398         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_ECC_LIMIT_ERR, "NCISOC ECC Limit Error", NV_FALSE);
4399         nvswitch_clear_flags(&unhandled, bit);
4400     }
4401 
4402     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXPOISONDET, 1);
4403     if (nvswitch_test_flags(pending, bit))
4404     {
4405         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TXPOISONDET, "Poison Error", NV_FALSE);
4406         nvswitch_clear_flags(&unhandled, bit);
4407     }
4408 
4409     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_HW_ERR, 1);
4410     if (nvswitch_test_flags(pending, bit))
4411     {
4412         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_HW_ERR, "TX Response Status HW Error", NV_FALSE);
4413         nvswitch_clear_flags(&unhandled, bit);
4414     }
4415 
4416     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_UR_ERR, 1);
4417     if (nvswitch_test_flags(pending, bit))
4418     {
4419         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_UR_ERR, "TX Response Status UR Error", NV_FALSE);
4420         nvswitch_clear_flags(&unhandled, bit);
4421     }
4422 
4423     bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_PRIV_ERR, 1);
4424     if (nvswitch_test_flags(pending, bit))
4425     {
4426         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_PRIV_ERR, "TX Response Status PRIV Error", NV_FALSE);
4427         nvswitch_clear_flags(&unhandled, bit);
4428     }
4429 
4430     if (report.raw_first & report.mask)
4431     {
4432         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FIRST_0,
4433             report.raw_first & report.mask);
4434     }
4435 
4436     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4437 
4438     // Disable interrupts that have occurred after fatal error.
4439     if (device->link[link].fatal_error_occurred)
4440     {
4441         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FATAL_REPORT_EN_0,
4442                 report.raw_enable ^ pending);
4443     }
4444 
4445     NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_STATUS_0, pending);
4446 
4447     if (unhandled != 0)
4448     {
4449         NVSWITCH_PRINT(device, WARN,
4450                 "%s: Unhandled NVLTLC_TX_SYS interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
4451                  __FUNCTION__, link, pending, report.raw_enable);
4452         return -NVL_MORE_PROCESSING_REQUIRED;
4453     }
4454 
4455     return NVL_SUCCESS;
4456 }
4457 
4458 static NvlStatus
4459 _nvswitch_service_nvltlc_rx_sys_fatal_ls10
4460 (
4461     nvswitch_device *device,
4462     NvU32 nvlipt_instance,
4463     NvU32 link
4464 )
4465 {
4466     NvU32 pending, bit, unhandled;
4467     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4468 
4469     report.raw_pending = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_STATUS_0);
4470     report.raw_enable = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FATAL_REPORT_EN_0);
4471     report.mask = report.raw_enable;
4472     pending = report.raw_pending & report.mask;
4473 
4474     if (pending == 0)
4475     {
4476         return -NVL_NOT_FOUND;
4477     }
4478 
4479     unhandled = pending;
4480     report.raw_first = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FIRST_0);
4481 
4482     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1);
4483     if (nvswitch_test_flags(pending, bit))
4484     {
4485         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_NCISOC_PARITY_ERR, "NCISOC Parity Error", NV_FALSE);
4486         nvswitch_clear_flags(&unhandled, bit);
4487     }
4488 
4489     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _HDR_RAM_ECC_DBE_ERR, 1);
4490     if (nvswitch_test_flags(pending, bit))
4491     {
4492         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_DBE_ERR, "HDR RAM ECC DBE Error", NV_FALSE);
4493         nvswitch_clear_flags(&unhandled, bit);
4494     }
4495 
4496     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _HDR_RAM_ECC_LIMIT_ERR, 1);
4497     if (nvswitch_test_flags(pending, bit))
4498     {
4499         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_LIMIT_ERR, "HDR RAM ECC Limit Error", NV_FALSE);
4500         nvswitch_clear_flags(&unhandled, bit);
4501     }
4502 
4503     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT0_RAM_ECC_DBE_ERR, 1);
4504     if (nvswitch_test_flags(pending, bit))
4505     {
4506         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_DBE_ERR, "DAT0 RAM ECC DBE Error", NV_FALSE);
4507         nvswitch_clear_flags(&unhandled, bit);
4508     }
4509 
4510     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT0_RAM_ECC_LIMIT_ERR, 1);
4511     if (nvswitch_test_flags(pending, bit))
4512     {
4513         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_LIMIT_ERR, "DAT0 RAM ECC Limit Error", NV_FALSE);
4514         nvswitch_clear_flags(&unhandled, bit);
4515     }
4516 
4517     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT1_RAM_ECC_DBE_ERR, 1);
4518     if (nvswitch_test_flags(pending, bit))
4519     {
4520         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_DBE_ERR, "DAT1 RAM ECC DBE Error", NV_FALSE);
4521         nvswitch_clear_flags(&unhandled, bit);
4522     }
4523 
4524     bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT1_RAM_ECC_LIMIT_ERR, 1);
4525     if (nvswitch_test_flags(pending, bit))
4526     {
4527         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_LIMIT_ERR, "DAT1 RAM ECC Limit Error", NV_FALSE);
4528         nvswitch_clear_flags(&unhandled, bit);
4529     }
4530 
4531     if (report.raw_first & report.mask)
4532     {
4533         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FIRST_0,
4534             report.raw_first & report.mask);
4535     }
4536 
4537     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4538 
4539     // Disable interrupts that have occurred after fatal error.
4540     if (device->link[link].fatal_error_occurred)
4541     {
4542         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FATAL_REPORT_EN_0,
4543                 report.raw_enable ^ pending);
4544     }
4545 
4546     NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_STATUS_0, pending);
4547 
4548     if (unhandled != 0)
4549     {
4550         NVSWITCH_PRINT(device, WARN,
4551                 "%s: Unhandled NVLTLC_RX_SYS interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
4552                  __FUNCTION__, link, pending, report.raw_enable);
4553         return -NVL_MORE_PROCESSING_REQUIRED;
4554     }
4555 
4556     return NVL_SUCCESS;
4557 }
4558 
4559 static NvlStatus
4560 _nvswitch_service_nvltlc_tx_lnk_fatal_0_ls10
4561 (
4562     nvswitch_device *device,
4563     NvU32 nvlipt_instance,
4564     NvU32 link
4565 )
4566 {
4567     NvU32 pending, bit, unhandled;
4568     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4569 
4570     report.raw_pending = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0);
4571     report.raw_enable = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0);
4572     report.mask = report.raw_enable;
4573     pending = report.raw_pending & report.mask;
4574 
4575     if (pending == 0)
4576     {
4577         return -NVL_NOT_FOUND;
4578     }
4579 
4580     unhandled = pending;
4581     report.raw_first = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0);
4582 
4583     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _TXDLCREDITPARITYERR, 1);
4584     if (nvswitch_test_flags(pending, bit))
4585     {
4586         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TXDLCREDITPARITYERR, "TX DL Credit Parity Error", NV_FALSE);
4587         nvswitch_clear_flags(&unhandled, bit);
4588     }
4589 
4590     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_HDR_ECC_DBE_ERR, 1);
4591     if (nvswitch_test_flags(pending, bit))
4592     {
4593         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_HDR_ECC_DBE_ERR, "CREQ RAM HDR ECC DBE Error", NV_FALSE);
4594         nvswitch_clear_flags(&unhandled, bit);
4595     }
4596 
4597     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_HDR_ECC_DBE_ERR, 1);
4598     if (nvswitch_test_flags(pending, bit))
4599     {
4600         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_HDR_ECC_DBE_ERR, "Response RAM HDR ECC DBE Error", NV_FALSE);
4601         nvswitch_clear_flags(&unhandled, bit);
4602     }
4603 
4604     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_HDR_ECC_DBE_ERR, 1);
4605     if (nvswitch_test_flags(pending, bit))
4606     {
4607         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_COM_RAM_HDR_ECC_DBE_ERR, "COM RAM HDR ECC DBE Error", NV_FALSE);
4608         nvswitch_clear_flags(&unhandled, bit);
4609     }
4610 
4611     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_HDR_ECC_DBE_ERR, 1);
4612     if (nvswitch_test_flags(pending, bit))
4613     {
4614         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_HDR_ECC_DBE_ERR, "RSP1 RAM HDR ECC DBE Error", NV_FALSE);
4615         nvswitch_clear_flags(&unhandled, bit);
4616     }
4617 
4618     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_DAT_ECC_DBE_ERR, 1);
4619     if (nvswitch_test_flags(pending, bit))
4620     {
4621         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_DAT_ECC_DBE_ERR, "RSP1 RAM DAT ECC DBE Error", NV_FALSE);
4622         nvswitch_clear_flags(&unhandled, bit);
4623     }
4624 
4625     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4626 
4627     // Disable interrupts that have occurred after fatal error.
4628     if (device->link[link].fatal_error_occurred)
4629     {
4630         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
4631                 report.raw_enable ^ pending);
4632     }
4633 
4634     if (report.raw_first & report.mask)
4635     {
4636         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0,
4637                 report.raw_first & report.mask);
4638     }
4639     NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0, pending);
4640 
4641     if (unhandled != 0)
4642     {
4643         NVSWITCH_PRINT(device, WARN,
4644                 "%s: Unhandled NVLTLC_TX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
4645                  __FUNCTION__, link, pending, report.raw_enable);
4646         return -NVL_MORE_PROCESSING_REQUIRED;
4647     }
4648 
4649     return NVL_SUCCESS;
4650 }
4651 
4652 static NvlStatus
4653 _nvswitch_service_nvltlc_rx_lnk_fatal_0_ls10
4654 (
4655     nvswitch_device *device,
4656     NvU32 nvlipt_instance,
4657     NvU32 link
4658 )
4659 {
4660     NvU32 pending, bit, unhandled;
4661     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4662 
4663     report.raw_pending = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0);
4664     report.raw_enable = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_0);
4665     report.mask = report.raw_enable;
4666     pending = report.raw_pending & report.mask;
4667     if (pending == 0)
4668     {
4669         return -NVL_NOT_FOUND;
4670     }
4671 
4672     unhandled = pending;
4673     report.raw_first = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0);
4674 
4675     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLHDRPARITYERR, 1);
4676     if (nvswitch_test_flags(pending, bit))
4677     {
4678         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLHDRPARITYERR, "RX DL HDR Parity Error", NV_FALSE);
4679         nvswitch_clear_flags(&unhandled, bit);
4680     }
4681 
4682     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLDATAPARITYERR, 1);
4683     if (nvswitch_test_flags(pending, bit))
4684     {
4685         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLDATAPARITYERR, "RX DL Data Parity Error", NV_FALSE);
4686         nvswitch_clear_flags(&unhandled, bit);
4687     }
4688 
4689     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLCTRLPARITYERR, 1);
4690     if (nvswitch_test_flags(pending, bit))
4691     {
4692         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLCTRLPARITYERR, "RX DL Ctrl Parity Error", NV_FALSE);
4693         nvswitch_clear_flags(&unhandled, bit);
4694     }
4695 
4696     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXPKTLENERR, 1);
4697     if (nvswitch_test_flags(pending, bit))
4698     {
4699         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXPKTLENERR, "RX Packet Length Error", NV_FALSE);
4700         nvswitch_clear_flags(&unhandled, bit);
4701     }
4702 
4703     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVCACHEATTRPROBEREQERR, 1);
4704     if (nvswitch_test_flags(pending, bit))
4705     {
4706         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVCACHEATTRPROBEREQERR, "RSV Packet Status Error", NV_FALSE);
4707         nvswitch_clear_flags(&unhandled, bit);
4708     }
4709 
4710     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVCACHEATTRPROBERSPERR, 1);
4711     if (nvswitch_test_flags(pending, bit))
4712     {
4713         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVCACHEATTRPROBERSPERR, "RSV CacheAttr Probe Rsp Error", NV_FALSE);
4714         nvswitch_clear_flags(&unhandled, bit);
4715     }
4716 
4717     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _DATLENGTRMWREQMAXERR, 1);
4718     if (nvswitch_test_flags(pending, bit))
4719     {
4720         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_DATLENGTRMWREQMAXERR, "Data Length RMW Req Max Error", NV_FALSE);
4721         nvswitch_clear_flags(&unhandled, bit);
4722     }
4723 
4724     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _DATLENLTATRRSPMINERR, 1);
4725     if (nvswitch_test_flags(pending, bit))
4726     {
4727         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_DATLENLTATRRSPMINERR, "Data Len Lt ATR RSP Min Error", NV_FALSE);
4728         nvswitch_clear_flags(&unhandled, bit);
4729     }
4730 
4731     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _INVALIDCACHEATTRPOERR, 1);
4732     if (nvswitch_test_flags(pending, bit))
4733     {
4734         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_INVALIDCACHEATTRPOERR, "Invalid Cache Attr PO Error", NV_FALSE);
4735         nvswitch_clear_flags(&unhandled, bit);
4736     }
4737 
4738     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_HW_ERR, 1);
4739     if (nvswitch_test_flags(pending, bit))
4740     {
4741         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_HW_ERR, "RX Rsp Status HW Error", NV_FALSE);
4742         nvswitch_clear_flags(&unhandled, bit);
4743     }
4744 
4745     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_UR_ERR, 1);
4746     if (nvswitch_test_flags(pending, bit))
4747     {
4748         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_UR_ERR, "RX Rsp Status UR Error", NV_FALSE);
4749         nvswitch_clear_flags(&unhandled, bit);
4750     }
4751 
4752     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _INVALID_COLLAPSED_RESPONSE_ERR, 1);
4753     if (nvswitch_test_flags(pending, bit))
4754     {
4755         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_INVALID_COLLAPSED_RESPONSE_ERR, "Invalid Collapsed Response Error", NV_FALSE);
4756         nvswitch_clear_flags(&unhandled, bit);
4757     }
4758 
4759     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4760 
4761     // Disable interrupts that have occurred after fatal error.
4762     if (device->link[link].fatal_error_occurred)
4763     {
4764         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_0,
4765                 report.raw_enable ^ pending);
4766     }
4767 
4768     if (report.raw_first & report.mask)
4769     {
4770         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0,
4771                 report.raw_first & report.mask);
4772     }
4773     NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0, pending);
4774 
4775     if (unhandled != 0)
4776     {
4777         NVSWITCH_PRINT(device, WARN,
4778                 "%s: Unhandled NVLTLC_RX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
4779                  __FUNCTION__, link, pending, report.raw_enable);
4780         return -NVL_MORE_PROCESSING_REQUIRED;
4781     }
4782 
4783     return NVL_SUCCESS;
4784 }
4785 
4786 static NvlStatus
4787 _nvswitch_service_nvltlc_rx_lnk_fatal_1_ls10
4788 (
4789     nvswitch_device *device,
4790     NvU32 nvlipt_instance,
4791     NvU32 link
4792 )
4793 {
4794     NvU32 pending, bit, unhandled;
4795     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4796 
4797     report.raw_pending = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1);
4798     report.raw_enable = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_1);
4799     report.mask = report.raw_enable;
4800     pending = report.raw_pending & report.mask;
4801 
4802     if (pending == 0)
4803     {
4804         return -NVL_NOT_FOUND;
4805     }
4806 
4807     unhandled = pending;
4808     report.raw_first = NVSWITCH_LINK_RD32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1);
4809 
4810     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXHDROVFERR, 1);
4811     if (nvswitch_test_flags(pending, bit))
4812     {
4813         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXHDROVFERR, "RX HDR OVF Error", NV_FALSE);
4814         nvswitch_clear_flags(&unhandled, bit);
4815     }
4816 
4817     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXDATAOVFERR, 1);
4818     if (nvswitch_test_flags(pending, bit))
4819     {
4820         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDATAOVFERR, "RX Data OVF Error", NV_FALSE);
4821         nvswitch_clear_flags(&unhandled, bit);
4822     }
4823 
4824     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _STOMPDETERR, 1);
4825     if (nvswitch_test_flags(pending, bit))
4826     {
4827         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_STOMPDETERR, "Stomp Det Error", NV_FALSE);
4828         nvswitch_clear_flags(&unhandled, bit);
4829     }
4830 
4831     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXPOISONERR, 1);
4832     if (nvswitch_test_flags(pending, bit))
4833     {
4834         NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXPOISONERR, "RX Poison Error", NV_FALSE);
4835         nvswitch_clear_flags(&unhandled, bit);
4836     }
4837 
4838     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4839 
4840     // Disable interrupts that have occurred after fatal error.
4841     if (device->link[link].fatal_error_occurred)
4842     {
4843         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_1,
4844                 report.raw_enable ^ pending);
4845     }
4846 
4847     if (report.raw_first & report.mask)
4848     {
4849         NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1,
4850                 report.raw_first & report.mask);
4851     }
4852     NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1, pending);
4853 
4854     if (unhandled != 0)
4855     {
4856         NVSWITCH_PRINT(device, WARN,
4857                 "%s: Unhandled NVLTLC_RX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
4858                  __FUNCTION__, link, pending, report.raw_enable);
4859         return -NVL_MORE_PROCESSING_REQUIRED;
4860     }
4861 
4862     return NVL_SUCCESS;
4863 }
4864 
4865 static NvBool
4866 _nvswitch_is_ncisoc_clock_off_ls10
4867 (
4868     nvswitch_device *device,
4869     nvlink_link     *link
4870 )
4871 {
4872     NvU32 clkStatus;
4873     clkStatus = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
4874             NVLIPT_LNK, _NVLIPT_LNK, _CTRL_CLK_CTRL);
4875     if(FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _NCISOCCLK_STS, _OFF, clkStatus))
4876     {
4877         return NV_TRUE;
4878     }
4879     return NV_FALSE;
4880 }
4881 
4882 NvlStatus
4883 _nvswitch_service_nvltlc_fatal_ls10
4884 (
4885     nvswitch_device *device,
4886     NvU32 nvlipt_instance
4887 )
4888 {
4889     NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
4890     NvU32 i;
4891     nvlink_link *link;
4892     NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED;
4893 
4894     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
4895     localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
4896     localEnabledLinkMask = enabledLinkMask & localLinkMask;
4897 
4898     FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
4899     {
4900         link = nvswitch_get_link(device, i);
4901         if (link == NULL)
4902         {
4903             // An interrupt on an invalid link should never occur
4904             NVSWITCH_ASSERT(link != NULL);
4905             continue;
4906         }
4907 
4908         if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance)
4909         {
4910             NVSWITCH_ASSERT(0);
4911             break;
4912         }
4913 
4914         //
4915         // If link is in reset or NCISOC clock is off then
4916         // don't need to check the link for NVLTLC errors
4917         // as the IP's registers are off
4918         //
4919         if (nvswitch_is_link_in_reset(device, link) ||
4920             _nvswitch_is_ncisoc_clock_off_ls10(device, link))
4921         {
4922             continue;
4923         }
4924 
4925         if (_nvswitch_service_nvltlc_tx_sys_fatal_ls10(device, nvlipt_instance, i) == NVL_SUCCESS)
4926         {
4927             status = NVL_SUCCESS;
4928         }
4929 
4930         if (_nvswitch_service_nvltlc_rx_sys_fatal_ls10(device, nvlipt_instance, i) == NVL_SUCCESS)
4931         {
4932             status = NVL_SUCCESS;
4933         }
4934 
4935         if (_nvswitch_service_nvltlc_tx_lnk_fatal_0_ls10(device, nvlipt_instance, i) == NVL_SUCCESS)
4936         {
4937             status = NVL_SUCCESS;
4938         }
4939 
4940         if (_nvswitch_service_nvltlc_rx_lnk_fatal_0_ls10(device, nvlipt_instance, i) == NVL_SUCCESS)
4941         {
4942             status = NVL_SUCCESS;
4943         }
4944 
4945         if (_nvswitch_service_nvltlc_rx_lnk_fatal_1_ls10(device, nvlipt_instance, i) == NVL_SUCCESS)
4946         {
4947             status = NVL_SUCCESS;
4948         }
4949     }
4950     FOR_EACH_INDEX_IN_MASK_END;
4951 
4952     return status;
4953 }
4954 
4955 static NvlStatus
4956 _nvswitch_service_nvlipt_common_fatal_ls10
4957 (
4958     nvswitch_device *device,
4959     NvU32 instance
4960 )
4961 {
4962     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
4963     NvU32 pending, bit, contain, unhandled;
4964     NvU32 link, local_link_idx;
4965 
4966     report.raw_pending = NVSWITCH_ENG_RD32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_STATUS_0);
4967     report.raw_enable = NVSWITCH_ENG_RD32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_FATAL_REPORT_EN_0);
4968     report.mask = report.raw_enable & (DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _CLKCTL_ILLEGAL_REQUEST, 1));
4969 
4970     pending = report.raw_pending & report.mask;
4971     if (pending == 0)
4972     {
4973         return -NVL_NOT_FOUND;
4974     }
4975 
4976     unhandled = pending;
4977     report.raw_first = NVSWITCH_ENG_RD32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_FIRST_0);
4978     contain = NVSWITCH_ENG_RD32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_CONTAIN_EN_0);
4979 
4980     bit = DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _CLKCTL_ILLEGAL_REQUEST, 1);
4981     if (nvswitch_test_flags(pending, bit))
4982     {
4983         for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT_LS10; local_link_idx++)
4984         {
4985             link = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + local_link_idx;
4986             if (nvswitch_is_link_valid(device, link))
4987             {
4988                 NVSWITCH_REPORT_CONTAIN(_HW_NVLIPT_CLKCTL_ILLEGAL_REQUEST, "CLKCTL_ILLEGAL_REQUEST", NV_FALSE);
4989             }
4990         }
4991 
4992         nvswitch_clear_flags(&unhandled, bit);
4993     }
4994 
4995     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
4996 
4997     // Disable interrupts that have occurred after fatal error.
4998     for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT_LS10; local_link_idx++)
4999     {
5000         link = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + local_link_idx;
5001         if (nvswitch_is_link_valid(device, link) &&
5002             (device->link[link].fatal_error_occurred))
5003         {
5004             NVSWITCH_ENG_WR32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_FATAL_REPORT_EN_0,
5005                 report.raw_enable ^ pending);
5006             break;
5007         }
5008     }
5009 
5010     // clear the interrupts
5011     if (report.raw_first & report.mask)
5012     {
5013         NVSWITCH_ENG_WR32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_FIRST_0,
5014             report.raw_first & report.mask);
5015     }
5016     NVSWITCH_ENG_WR32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_STATUS_0, pending);
5017 
5018     if (unhandled != 0)
5019     {
5020         NVSWITCH_PRINT(device, WARN,
5021                 "%s: Unhandled NVLIPT_COMMON FATAL interrupts, pending: 0x%x enabled: 0x%x.\n",
5022                  __FUNCTION__, pending, report.raw_enable);
5023         return -NVL_MORE_PROCESSING_REQUIRED;
5024     }
5025 
5026     return NVL_SUCCESS;
5027 }
5028 
5029 static NvlStatus
5030 _nvswitch_service_nxbar_tile_ls10
5031 (
5032     nvswitch_device *device,
5033     NvU32 tile
5034 )
5035 {
5036     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5037     NvU32 pending, bit, unhandled;
5038     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5039     NvU32 link = tile;
5040 
5041     report.raw_pending = NVSWITCH_TILE_RD32(device, tile, _NXBAR_TILE, _ERR_STATUS);
5042     report.raw_enable = NVSWITCH_TILE_RD32(device, tile, _NXBAR_TILE, _ERR_FATAL_INTR_EN);
5043     report.mask = chip_device->intr_mask.tile.fatal;
5044     pending = report.raw_pending & report.mask;
5045 
5046    if (pending == 0)
5047     {
5048         return -NVL_NOT_FOUND;
5049     }
5050 
5051     unhandled = pending;
5052     report.raw_first = NVSWITCH_TILE_RD32(device, tile, _NXBAR_TILE, _ERR_FIRST);
5053 
5054     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BUFFER_OVERFLOW, 1);
5055     if (nvswitch_test_flags(pending, bit))
5056     {
5057         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BUFFER_OVERFLOW, "ingress SRC-VC buffer overflow", NV_TRUE);
5058         nvswitch_clear_flags(&unhandled, bit);
5059     }
5060 
5061     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BUFFER_UNDERFLOW, 1);
5062     if (nvswitch_test_flags(pending, bit))
5063     {
5064         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BUFFER_UNDERFLOW, "ingress SRC-VC buffer underflow", NV_TRUE);
5065         nvswitch_clear_flags(&unhandled, bit);
5066     }
5067 
5068     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _EGRESS_CREDIT_OVERFLOW, 1);
5069     if (nvswitch_test_flags(pending, bit))
5070     {
5071         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_EGRESS_CREDIT_OVERFLOW, "egress DST-VC credit overflow", NV_TRUE);
5072         nvswitch_clear_flags(&unhandled, bit);
5073     }
5074 
5075     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _EGRESS_CREDIT_UNDERFLOW, 1);
5076     if (nvswitch_test_flags(pending, bit))
5077     {
5078         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_EGRESS_CREDIT_UNDERFLOW, "egress DST-VC credit underflow", NV_TRUE);
5079         nvswitch_clear_flags(&unhandled, bit);
5080     }
5081 
5082     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_NON_BURSTY_PKT, 1);
5083     if (nvswitch_test_flags(pending, bit))
5084     {
5085         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_NON_BURSTY_PKT, "ingress packet burst error", NV_TRUE);
5086         nvswitch_clear_flags(&unhandled, bit);
5087     }
5088 
5089     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_NON_STICKY_PKT, 1);
5090     if (nvswitch_test_flags(pending, bit))
5091     {
5092         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_NON_STICKY_PKT, "ingress packet sticky error", NV_TRUE);
5093         nvswitch_clear_flags(&unhandled, bit);
5094     }
5095 
5096     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BURST_GT_9_DATA_VC, 1);
5097     if (nvswitch_test_flags(pending, bit))
5098     {
5099         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BURST_GT_9_DATA_VC, "possible bubbles at ingress", NV_TRUE);
5100         nvswitch_clear_flags(&unhandled, bit);
5101     }
5102 
5103     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_PKT_INVALID_DST, 1);
5104     if (nvswitch_test_flags(pending, bit))
5105     {
5106         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_PKT_INVALID_DST, "ingress packet invalid dst error", NV_TRUE);
5107         nvswitch_clear_flags(&unhandled, bit);
5108     }
5109 
5110     bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_PKT_PARITY_ERROR, 1);
5111     if (nvswitch_test_flags(pending, bit))
5112     {
5113         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_PKT_PARITY_ERROR, "ingress packet parity error", NV_TRUE);
5114         nvswitch_clear_flags(&unhandled, bit);
5115     }
5116 
5117     if (report.raw_first & report.mask)
5118     {
5119         NVSWITCH_TILE_WR32(device, tile, _NXBAR_TILE, _ERR_FIRST,
5120             report.raw_first & report.mask);
5121     }
5122 
5123     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5124 
5125     // Disable interrupts that have occurred after fatal error.
5126     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
5127     NVSWITCH_TILE_WR32(device, tile, _NXBAR_TILE, _ERR_FATAL_INTR_EN,
5128                             report.raw_enable ^ pending);
5129 
5130     NVSWITCH_TILE_WR32(device, link, _NXBAR_TILE, _ERR_STATUS, pending);
5131 
5132     if (unhandled != 0)
5133     {
5134         return -NVL_MORE_PROCESSING_REQUIRED;
5135     }
5136 
5137     return NVL_SUCCESS;
5138 }
5139 
5140 static NvlStatus
5141 _nvswitch_service_nxbar_tileout_ls10
5142 (
5143     nvswitch_device *device,
5144     NvU32 tileout
5145 )
5146 {
5147     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5148     NvU32 pending, bit, unhandled;
5149     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5150     NvU32 link = tileout;
5151 
5152     report.raw_pending = NVSWITCH_TILEOUT_RD32(device, tileout, _NXBAR_TILEOUT, _ERR_STATUS);
5153     report.raw_enable = NVSWITCH_TILEOUT_RD32(device, tileout, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN);
5154     report.mask = chip_device->intr_mask.tileout.fatal;
5155     pending = report.raw_pending & report.mask;
5156 
5157     if (pending == 0)
5158     {
5159         return -NVL_NOT_FOUND;
5160     }
5161 
5162     unhandled = pending;
5163     report.raw_first = NVSWITCH_TILEOUT_RD32(device, tileout, _NXBAR_TILEOUT, _ERR_FIRST);
5164 
5165     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _INGRESS_BUFFER_OVERFLOW, 1);
5166     if (nvswitch_test_flags(pending, bit))
5167     {
5168         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BUFFER_OVERFLOW, "ingress SRC-VC buffer overflow", NV_TRUE);
5169         nvswitch_clear_flags(&unhandled, bit);
5170     }
5171 
5172     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _INGRESS_BUFFER_UNDERFLOW, 1);
5173     if (nvswitch_test_flags(pending, bit))
5174     {
5175         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BUFFER_UNDERFLOW, "ingress SRC-VC buffer underflow", NV_TRUE);
5176         nvswitch_clear_flags(&unhandled, bit);
5177     }
5178 
5179     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _EGRESS_CREDIT_OVERFLOW, 1);
5180     if (nvswitch_test_flags(pending, bit))
5181     {
5182         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CREDIT_OVERFLOW, "egress DST-VC credit overflow", NV_TRUE);
5183         nvswitch_clear_flags(&unhandled, bit);
5184     }
5185 
5186     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _EGRESS_CREDIT_UNDERFLOW, 1);
5187     if (nvswitch_test_flags(pending, bit))
5188     {
5189         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CREDIT_UNDERFLOW, "egress DST-VC credit underflow", NV_TRUE);
5190         nvswitch_clear_flags(&unhandled, bit);
5191     }
5192 
5193     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _INGRESS_NON_BURSTY_PKT, 1);
5194     if (nvswitch_test_flags(pending, bit))
5195     {
5196         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_NON_BURSTY_PKT, "ingress packet burst error", NV_TRUE);
5197         nvswitch_clear_flags(&unhandled, bit);
5198     }
5199 
5200     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _INGRESS_NON_STICKY_PKT, 1);
5201     if (nvswitch_test_flags(pending, bit))
5202     {
5203         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_NON_STICKY_PKT, "ingress packet sticky error", NV_TRUE);
5204         nvswitch_clear_flags(&unhandled, bit);
5205     }
5206 
5207     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _INGRESS_BURST_GT_9_DATA_VC, 1);
5208     if (nvswitch_test_flags(pending, bit))
5209     {
5210         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BURST_GT_9_DATA_VC, "possible bubbles at ingress", NV_TRUE);
5211         nvswitch_clear_flags(&unhandled, bit);
5212     }
5213 
5214     bit = DRF_NUM(_NXBAR_TILEOUT, _ERR_STATUS, _EGRESS_CDT_PARITY_ERROR, 1);
5215     if (nvswitch_test_flags(pending, bit))
5216     {
5217         NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CDT_PARITY_ERROR, "ingress credit parity error", NV_TRUE);
5218         nvswitch_clear_flags(&unhandled, bit);
5219     }
5220 
5221     if (report.raw_first & report.mask)
5222     {
5223         NVSWITCH_TILEOUT_WR32(device, tileout, _NXBAR_TILEOUT, _ERR_FIRST,
5224             report.raw_first & report.mask);
5225     }
5226 
5227     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5228 
5229     // Disable interrupts that have occurred after fatal error.
5230     // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
5231     NVSWITCH_TILEOUT_WR32(device, tileout, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN,
5232                             report.raw_enable ^ pending);
5233 
5234     NVSWITCH_TILEOUT_WR32(device, tileout, _NXBAR_TILEOUT, _ERR_STATUS, pending);
5235 
5236     if (unhandled != 0)
5237     {
5238         return -NVL_MORE_PROCESSING_REQUIRED;
5239     }
5240 
5241     return NVL_SUCCESS;
5242 }
5243 
5244 static NvlStatus
5245 _nvswitch_service_nxbar_fatal_ls10
5246 (
5247     nvswitch_device *device,
5248     NvU32 nxbar
5249 )
5250 {
5251     NvU32 pending, bit, unhandled;
5252     NvU32 tile_idx;
5253     NvU32 tile, tileout;
5254 
5255     pending = NVSWITCH_ENG_RD32(device, NXBAR, , nxbar, _NXBAR, _TCP_ERROR_STATUS);
5256     if (pending == 0)
5257     {
5258         return -NVL_NOT_FOUND;
5259     }
5260 
5261     unhandled = pending;
5262 
5263     for (tile = 0; tile < NUM_NXBAR_TILES_PER_TC_LS10; tile++)
5264     {
5265         bit = DRF_NUM(_NXBAR, _TCP_ERROR_STATUS, _TILE0, 1) << tile;
5266         if (nvswitch_test_flags(pending, bit))
5267         {
5268             tile_idx = TILE_INDEX_LS10(device, nxbar, tile);
5269             if (NVSWITCH_ENG_VALID_LS10(device, TILE, tile_idx))
5270             {
5271                 if (_nvswitch_service_nxbar_tile_ls10(device, tile_idx) == NVL_SUCCESS)
5272                 {
5273                     nvswitch_clear_flags(&unhandled, bit);
5274                 }
5275             }
5276         }
5277     }
5278 
5279     for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LS10; tileout++)
5280     {
5281         bit = DRF_NUM(_NXBAR, _TCP_ERROR_STATUS, _TILEOUT0, 1) << tileout;
5282         if (nvswitch_test_flags(pending, bit))
5283         {
5284             tile_idx = TILE_INDEX_LS10(device, nxbar, tileout);
5285             if (NVSWITCH_ENG_VALID_LS10(device, TILEOUT, tile_idx))
5286             {
5287                 if (_nvswitch_service_nxbar_tileout_ls10(device, tile_idx) == NVL_SUCCESS)
5288                 {
5289                     nvswitch_clear_flags(&unhandled, bit);
5290                 }
5291             }
5292         }
5293     }
5294 
5295     // TODO: Perform hot_reset to recover NXBAR
5296 
5297     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5298 
5299 
5300     if (unhandled != 0)
5301     {
5302         return -NVL_MORE_PROCESSING_REQUIRED;
5303     }
5304 
5305     return NVL_SUCCESS;
5306 }
5307 
5308 static void
5309 _nvswitch_emit_link_errors_nvldl_fatal_link_ls10
5310 (
5311     nvswitch_device *device,
5312     NvU32 nvlipt_instance,
5313     NvU32 link
5314 )
5315 {
5316     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5317     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5318     NvU32 pending, bit;
5319 
5320     // Only enabled link errors are deffered
5321     pending = chip_device->deferredLinkErrors[link].fatalIntrMask.dl;
5322     report.raw_pending = pending;
5323     report.raw_enable = pending;
5324     report.mask = report.raw_enable;
5325 
5326     bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
5327     if (nvswitch_test_flags(pending, bit))
5328     {
5329         NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_UP, "LTSSM Fault Up", NV_FALSE);
5330     }
5331 }
5332 
5333 static void
5334 _nvswitch_emit_link_errors_nvldl_nonfatal_link_ls10
5335 (
5336     nvswitch_device *device,
5337     NvU32 link
5338 )
5339 {
5340     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5341     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5342     NvU32 pending, bit;
5343 
5344     // Only enabled link errors are deffered
5345     pending = chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl;
5346     report.raw_pending = pending;
5347     report.raw_enable = pending;
5348     report.mask = report.raw_enable;
5349 
5350     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 1);
5351     if (nvswitch_test_flags(pending, bit))
5352     {
5353         // Disable further interrupts
5354         nvlink_link *nvlink = nvswitch_get_link(device, link);
5355         nvlink->errorThreshold.bInterruptTrigerred = NV_TRUE;
5356         nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink, NV_FALSE);
5357         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_SHORT_ERROR_RATE, "RX Short Error Rate");
5358     }
5359 }
5360 
5361 static void
5362 _nvswitch_emit_link_errors_nvltlc_rx_lnk_nonfatal_1_ls10
5363 (
5364     nvswitch_device *device,
5365     NvU32 nvlipt_instance,
5366     NvU32 link
5367 )
5368 {
5369     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5370     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5371     NvU32 pending, bit, injected;
5372 
5373     // Only enabled link errors are deffered
5374     pending = chip_device->deferredLinkErrors[link].nonFatalIntrMask.tlcRx1;
5375     injected = chip_device->deferredLinkErrors[link].nonFatalIntrMask.tlcRx1Injected;
5376     report.raw_pending = pending;
5377     report.raw_enable = pending;
5378     report.mask = report.raw_enable;
5379 
5380 
5381     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _HEARTBEAT_TIMEOUT_ERR, 1);
5382     if (nvswitch_test_flags(pending, bit))
5383     {
5384         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_RX_LNK_AN1_HEARTBEAT_TIMEOUT_ERR, "AN1 Heartbeat Timeout Error");
5385 
5386         if (FLD_TEST_DRF_NUM(_NVLTLC_RX_LNK, _ERR_REPORT_INJECT_1, _HEARTBEAT_TIMEOUT_ERR, 0x0, injected))
5387         {
5388         }
5389     }
5390 }
5391 
5392 static void
5393 _nvswitch_emit_link_errors_nvlipt_lnk_nonfatal_ls10
5394 (
5395     nvswitch_device *device,
5396     NvU32 nvlipt_instance,
5397     NvU32 link
5398 )
5399 {
5400     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5401     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5402     NvU32 pending, bit;
5403 
5404     // Only enabled link errors are deffered
5405     pending = chip_device->deferredLinkErrors[link].nonFatalIntrMask.liptLnk;
5406     report.raw_pending = pending;
5407     report.raw_enable = pending;
5408     report.mask = report.raw_enable;
5409 
5410     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _FAILEDMINIONREQUEST, 1);
5411     if (nvswitch_test_flags(pending, bit))
5412     {
5413         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_FAILEDMINIONREQUEST, "_FAILEDMINIONREQUEST");
5414 
5415     }
5416 }
5417 
5418 static void
5419 _nvswitch_emit_deferred_link_errors_ls10
5420 (
5421     nvswitch_device *device,
5422     NvU32 nvlipt_instance,
5423     NvU32 link
5424 )
5425 {
5426     _nvswitch_emit_link_errors_nvldl_fatal_link_ls10(device, nvlipt_instance, link);
5427     _nvswitch_emit_link_errors_nvldl_nonfatal_link_ls10(device, link);
5428     _nvswitch_emit_link_errors_nvltlc_rx_lnk_nonfatal_1_ls10(device, nvlipt_instance, link);
5429     _nvswitch_emit_link_errors_nvlipt_lnk_nonfatal_ls10(device, nvlipt_instance, link);
5430 }
5431 
5432 static void
5433 _nvswitch_clear_deferred_link_errors_ls10
5434 (
5435     nvswitch_device *device,
5436     NvU32 link
5437 )
5438 {
5439     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5440     NVLINK_LINK_ERROR_REPORTING *pLinkErrors;
5441 
5442     pLinkErrors = &chip_device->deferredLinkErrors[link];
5443 
5444     nvswitch_os_memset(pLinkErrors, 0, sizeof(NVLINK_LINK_ERROR_REPORTING));
5445 }
5446 
5447 static void
5448 _nvswitch_deferred_link_state_check_ls10
5449 (
5450     nvswitch_device *device,
5451     void *fn_args
5452 )
5453 {
5454     NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams =
5455                                            (NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS*)fn_args;
5456     NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance;
5457     NvU32 link = pErrorReportParams->link;
5458     ls10_device *chip_device;
5459     nvlink_link *pLink;
5460     NvU64 linkState;
5461 
5462     chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5463     pLink = nvswitch_get_link(device, pErrorReportParams->link);
5464 
5465     if ((pLink == NULL) ||
5466         (device->hal.nvswitch_corelib_get_dl_link_mode(pLink, &linkState) != NVL_SUCCESS) ||
5467          ((linkState != NVLINK_LINKSTATE_HS) && (linkState != NVLINK_LINKSTATE_SLEEP)))
5468     {
5469         _nvswitch_emit_deferred_link_errors_ls10(device, nvlipt_instance, link);
5470     }
5471 
5472     _nvswitch_clear_deferred_link_errors_ls10(device, link);
5473     nvswitch_os_free(pErrorReportParams);
5474     chip_device->deferredLinkErrors[link].bLinkStateCallBackEnabled = NV_FALSE;
5475 }
5476 
5477 static void
5478 _nvswitch_create_deferred_link_state_check_task_ls10
5479 (
5480     nvswitch_device *device,
5481     NvU32 nvlipt_instance,
5482     NvU32 link
5483 )
5484 {
5485     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5486     NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams;
5487     NvlStatus status;
5488 
5489     if (chip_device->deferredLinkErrors[link].bLinkStateCallBackEnabled)
5490     {
5491         return;
5492     }
5493 
5494     status = NVL_ERR_GENERIC;
5495     pErrorReportParams = nvswitch_os_malloc(sizeof(NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS));
5496     if(pErrorReportParams != NULL)
5497     {
5498         pErrorReportParams->nvlipt_instance = nvlipt_instance;
5499         pErrorReportParams->link = link;
5500 
5501         status = nvswitch_task_create_args(device, (void*)pErrorReportParams,
5502                                            &_nvswitch_deferred_link_state_check_ls10,
5503                                            NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS,
5504                                            NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE |
5505                                            NVSWITCH_TASK_TYPE_FLAGS_VOID_PTR_ARGS);
5506     }
5507 
5508     if (status == NVL_SUCCESS)
5509     {
5510         chip_device->deferredLinkErrors[link].bLinkStateCallBackEnabled = NV_TRUE;
5511     }
5512     else
5513     {
5514         NVSWITCH_PRINT(device, ERROR,
5515                         "%s: Failed to allocate memory. Cannot defer link state check.\n",
5516                         __FUNCTION__);
5517         _nvswitch_emit_deferred_link_errors_ls10(device, nvlipt_instance, link);
5518         _nvswitch_clear_deferred_link_errors_ls10(device, link);
5519         nvswitch_os_free(pErrorReportParams);
5520     }
5521 }
5522 
5523 static void
5524 _nvswitch_deferred_link_errors_check_ls10
5525 (
5526     nvswitch_device *device,
5527     void *fn_args
5528 )
5529 {
5530     NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams =
5531                                            (NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS*)fn_args;
5532     NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance;
5533     NvU32 link = pErrorReportParams->link;
5534     ls10_device *chip_device;
5535     NvU32 pending, bit;
5536 
5537     chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5538 
5539     pending = chip_device->deferredLinkErrors[link].fatalIntrMask.dl;
5540     bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
5541     if (nvswitch_test_flags(pending, bit))
5542     {
5543         _nvswitch_create_deferred_link_state_check_task_ls10(device, nvlipt_instance, link);
5544     }
5545     else
5546     {
5547         _nvswitch_emit_deferred_link_errors_ls10(device, nvlipt_instance, link);
5548         _nvswitch_clear_deferred_link_errors_ls10(device, link);
5549     }
5550 
5551     nvswitch_os_free(pErrorReportParams);
5552     chip_device->deferredLinkErrors[link].bLinkErrorsCallBackEnabled = NV_FALSE;
5553 }
5554 
5555 static void
5556 _nvswitch_create_deferred_link_errors_task_ls10
5557 (
5558     nvswitch_device *device,
5559     NvU32 nvlipt_instance,
5560     NvU32 link
5561 )
5562 {
5563     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5564     NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams;
5565     NvlStatus status;
5566 
5567     if (chip_device->deferredLinkErrors[link].bLinkErrorsCallBackEnabled)
5568     {
5569         return;
5570     }
5571 
5572     status = NVL_ERR_GENERIC;
5573     pErrorReportParams = nvswitch_os_malloc(sizeof(NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS));
5574     if(pErrorReportParams != NULL)
5575     {
5576         pErrorReportParams->nvlipt_instance = nvlipt_instance;
5577         pErrorReportParams->link = link;
5578 
5579         status = nvswitch_task_create_args(device, (void*)pErrorReportParams,
5580                                            &_nvswitch_deferred_link_errors_check_ls10,
5581                                            NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS,
5582                                            NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE |
5583                                            NVSWITCH_TASK_TYPE_FLAGS_VOID_PTR_ARGS);
5584     }
5585 
5586     if (status == NVL_SUCCESS)
5587     {
5588         chip_device->deferredLinkErrors[link].bLinkErrorsCallBackEnabled = NV_TRUE;
5589     }
5590     else
5591     {
5592         NVSWITCH_PRINT(device, ERROR,
5593                         "%s: Failed to create task. Cannot defer link error check.\n",
5594                         __FUNCTION__);
5595         _nvswitch_emit_deferred_link_errors_ls10(device, nvlipt_instance, link);
5596         _nvswitch_clear_deferred_link_errors_ls10(device, link);
5597         nvswitch_os_free(pErrorReportParams);
5598     }
5599 }
5600 
5601 static NvlStatus
5602 _nvswitch_service_nvldl_nonfatal_link_ls10
5603 (
5604     nvswitch_device *device,
5605     NvU32 nvlipt_instance,
5606     NvU32 link
5607 )
5608 {
5609     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5610     NvU32 pending, bit, unhandled;
5611     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5612 
5613     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLDL, _NVLDL_TOP, _INTR);
5614     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN);
5615     report.mask = report.raw_enable;
5616     pending = report.raw_pending & report.mask;
5617 
5618     if (pending == 0)
5619     {
5620         return -NVL_NOT_FOUND;
5621     }
5622 
5623     unhandled = pending;
5624 
5625     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_REPLAY, 1);
5626     if (nvswitch_test_flags(pending, bit))
5627     {
5628         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_TX_REPLAY, "TX Replay Error");
5629         nvswitch_clear_flags(&unhandled, bit);
5630     }
5631 
5632     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_RECOVERY_SHORT, 1);
5633     if (nvswitch_test_flags(pending, bit))
5634     {
5635         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_TX_RECOVERY_SHORT, "TX Recovery Short");
5636         nvswitch_clear_flags(&unhandled, bit);
5637     }
5638 
5639     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 1);
5640     if (nvswitch_test_flags(pending, bit))
5641     {
5642         chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl |= bit;
5643         _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
5644         nvswitch_clear_flags(&unhandled, bit);
5645     }
5646 
5647     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_LONG_ERROR_RATE, 1);
5648     if (nvswitch_test_flags(pending, bit))
5649     {
5650         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_LONG_ERROR_RATE, "RX Long Error Rate");
5651         nvswitch_clear_flags(&unhandled, bit);
5652     }
5653 
5654     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_ILA_TRIGGER, 1);
5655     if (nvswitch_test_flags(pending, bit))
5656     {
5657         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_ILA_TRIGGER, "RX ILA Trigger");
5658         nvswitch_clear_flags(&unhandled, bit);
5659     }
5660 
5661     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1);
5662     if (nvswitch_test_flags(pending, bit))
5663     {
5664         NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Counter");
5665         nvswitch_clear_flags(&unhandled, bit);
5666 
5667         //
5668         // Mask CRC counter after first occurrance - otherwise, this interrupt
5669         // will continue to fire once the CRC counter has hit the threshold
5670         // See Bug 3341528
5671         //
5672         report.raw_enable = report.raw_enable & (~bit);
5673         NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN,
5674             report.raw_enable);
5675     }
5676 
5677     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5678 
5679     NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR, pending);
5680 
5681     if (unhandled != 0)
5682     {
5683         NVSWITCH_PRINT(device, WARN,
5684                 "%s: Unhandled NVLDL nonfatal interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
5685                  __FUNCTION__, link, pending, report.raw_enable);
5686         return -NVL_MORE_PROCESSING_REQUIRED;
5687     }
5688 
5689     return NVL_SUCCESS;
5690 }
5691 
5692 static NvlStatus
5693 _nvswitch_service_nvldl_nonfatal_ls10
5694 (
5695     nvswitch_device *device,
5696     NvU32 nvlipt_instance
5697 )
5698 {
5699     NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
5700     NvU32 i;
5701     nvlink_link *link;
5702     NvlStatus status;
5703     NvlStatus return_status = -NVL_NOT_FOUND;
5704 
5705     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
5706     localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
5707     localEnabledLinkMask = enabledLinkMask & localLinkMask;
5708 
5709     FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
5710     {
5711         link = nvswitch_get_link(device, i);
5712         if (link == NULL)
5713         {
5714             // An interrupt on an invalid link should never occur
5715             NVSWITCH_ASSERT(link != NULL);
5716             continue;
5717         }
5718 
5719         if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance)
5720         {
5721             NVSWITCH_ASSERT(0);
5722             break;
5723         }
5724 
5725         if (nvswitch_is_link_in_reset(device, link))
5726         {
5727             continue;
5728         }
5729 
5730         status = _nvswitch_service_nvldl_nonfatal_link_ls10(device, nvlipt_instance, i);
5731         if (status != NVL_SUCCESS)
5732         {
5733             return_status = status;
5734         }
5735     }
5736     FOR_EACH_INDEX_IN_MASK_END;
5737 
5738     return return_status;
5739 }
5740 
5741 static NvlStatus
5742 _nvswitch_service_nvltlc_rx_lnk_nonfatal_0_ls10
5743 (
5744     nvswitch_device *device,
5745     NvU32 nvlipt_instance,
5746     NvU32 link
5747 )
5748 {
5749     NvU32 pending, bit, unhandled;
5750     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5751 
5752     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0);
5753     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_0);
5754     report.mask = report.raw_enable;
5755 
5756     pending = report.raw_pending & report.mask;
5757     if (pending == 0)
5758     {
5759         return -NVL_NOT_FOUND;
5760     }
5761 
5762     unhandled = pending;
5763     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0);
5764 
5765     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_PRIV_ERR, 1);
5766     if (nvswitch_test_flags(pending, bit))
5767     {
5768         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_PRIV_ERR, "RX Rsp Status PRIV Error");
5769         nvswitch_clear_flags(&unhandled, bit);
5770     }
5771 
5772     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5773 
5774     if (report.raw_first & report.mask)
5775     {
5776         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0,
5777                 report.raw_first & report.mask);
5778     }
5779     NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0, pending);
5780 
5781     if (unhandled != 0)
5782     {
5783         NVSWITCH_PRINT(device, WARN,
5784                 "%s: Unhandled NVLTLC_RX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
5785                  __FUNCTION__, link, pending, report.raw_enable);
5786         return -NVL_MORE_PROCESSING_REQUIRED;
5787     }
5788 
5789     return NVL_SUCCESS;
5790 }
5791 
5792 static NvlStatus
5793 _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
5794 (
5795     nvswitch_device *device,
5796     NvU32 nvlipt_instance,
5797     NvU32 link
5798 )
5799 {
5800     NvU32 pending, bit, unhandled;
5801     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5802 
5803     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0);
5804     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0);
5805     report.mask = report.raw_enable;
5806     pending = report.raw_pending & report.mask;
5807 
5808     if (pending == 0)
5809     {
5810         return -NVL_NOT_FOUND;
5811     }
5812 
5813     unhandled = pending;
5814     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0);
5815 
5816     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_DAT_ECC_DBE_ERR, 1);
5817     if (nvswitch_test_flags(pending, bit))
5818     {
5819         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_DAT_ECC_DBE_ERR, "CREQ RAM DAT ECC DBE Error");
5820         nvswitch_clear_flags(&unhandled, bit);
5821     }
5822 
5823     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_ECC_LIMIT_ERR, 1);
5824     if (nvswitch_test_flags(pending, bit))
5825     {
5826         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_ECC_LIMIT_ERR, "CREQ RAM DAT ECC Limit Error");
5827         nvswitch_clear_flags(&unhandled, bit);
5828     }
5829 
5830     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_DAT_ECC_DBE_ERR, 1);
5831     if (nvswitch_test_flags(pending, bit))
5832     {
5833         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_DAT_ECC_DBE_ERR, "Response RAM DAT ECC DBE Error");
5834         nvswitch_clear_flags(&unhandled, bit);
5835     }
5836 
5837     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_ECC_LIMIT_ERR, 1);
5838     if (nvswitch_test_flags(pending, bit))
5839     {
5840         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_ECC_LIMIT_ERR, "Response RAM ECC Limit Error");
5841         nvswitch_clear_flags(&unhandled, bit);
5842     }
5843 
5844     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_DAT_ECC_DBE_ERR, 1);
5845     if (nvswitch_test_flags(pending, bit))
5846     {
5847         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_COM_RAM_DAT_ECC_DBE_ERR, "COM RAM DAT ECC DBE Error");
5848         nvswitch_clear_flags(&unhandled, bit);
5849     }
5850 
5851     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_ECC_LIMIT_ERR, 1);
5852     if (nvswitch_test_flags(pending, bit))
5853     {
5854         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_COM_RAM_ECC_LIMIT_ERR, "COM RAM ECC Limit Error");
5855         nvswitch_clear_flags(&unhandled, bit);
5856     }
5857 
5858     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_ECC_LIMIT_ERR, 1);
5859     if (nvswitch_test_flags(pending, bit))
5860     {
5861         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_ECC_LIMIT_ERR, "RSP1 RAM ECC Limit Error");
5862         nvswitch_clear_flags(&unhandled, bit);
5863     }
5864 
5865     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5866 
5867     // Disable interrupts that have occurred after fatal error.
5868     if (device->link[link].fatal_error_occurred)
5869     {
5870         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
5871             report.raw_enable ^ pending);
5872     }
5873 
5874     if (report.raw_first & report.mask)
5875     {
5876         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0,
5877                 report.raw_first & report.mask);
5878     }
5879     NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0, pending);
5880 
5881     if (unhandled != 0)
5882     {
5883         NVSWITCH_PRINT(device, WARN,
5884                 "%s: Unhandled NVLTLC_TX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
5885                  __FUNCTION__, link, pending, report.raw_enable);
5886         return -NVL_MORE_PROCESSING_REQUIRED;
5887     }
5888 
5889     return NVL_SUCCESS;
5890 }
5891 
5892 static NvlStatus
5893 _nvswitch_service_nvltlc_rx_lnk_nonfatal_1_ls10
5894 (
5895     nvswitch_device *device,
5896     NvU32 nvlipt_instance,
5897     NvU32 link
5898 )
5899 {
5900     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
5901     NvU32 pending, bit, unhandled, injected;
5902     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5903 
5904     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1);
5905     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1);
5906     report.mask = report.raw_enable;
5907     pending = report.raw_pending & report.mask;
5908 
5909     if (pending == 0)
5910     {
5911         return -NVL_NOT_FOUND;
5912     }
5913 
5914     unhandled = pending;
5915     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1);
5916     injected = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_REPORT_INJECT_1);
5917 
5918     bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _HEARTBEAT_TIMEOUT_ERR, 1);
5919     if (nvswitch_test_flags(pending, bit))
5920     {
5921         chip_device->deferredLinkErrors[link].nonFatalIntrMask.tlcRx1 |= bit;
5922         chip_device->deferredLinkErrors[link].nonFatalIntrMask.tlcRx1Injected |= injected;
5923         _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
5924 
5925         if (FLD_TEST_DRF_NUM(_NVLTLC_RX_LNK, _ERR_REPORT_INJECT_1, _HEARTBEAT_TIMEOUT_ERR, 0x0, injected))
5926         {
5927             //
5928             // WAR Bug 200627368: Mask off HBTO to avoid a storm
5929             // During the start of reset_and_drain, all links on the GPU
5930             // will go into contain, causing HBTO on other switch links connected
5931             // to that GPU. For the switch side, these interrupts are not fatal,
5932             // but until we get to reset_and_drain for this link, HBTO will continue
5933             // to fire repeatedly. After reset_and_drain, HBTO will be re-enabled
5934             // by MINION after links are trained.
5935             //
5936             report.raw_enable = report.raw_enable & (~bit);
5937             NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1,
5938                 report.raw_enable);
5939         }
5940         nvswitch_clear_flags(&unhandled, bit);
5941     }
5942 
5943     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
5944 
5945     // Disable interrupts that have occurred after fatal error.
5946     if (device->link[link].fatal_error_occurred)
5947     {
5948         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1,
5949             report.raw_enable & (~pending));
5950     }
5951 
5952     if (report.raw_first & report.mask)
5953     {
5954         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1,
5955                 report.raw_first & report.mask);
5956     }
5957     NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1, pending);
5958 
5959     if (unhandled != 0)
5960     {
5961         NVSWITCH_PRINT(device, WARN,
5962                 "%s: Unhandled NVLTLC_RX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
5963                  __FUNCTION__, link, pending, report.raw_enable);
5964         return -NVL_MORE_PROCESSING_REQUIRED;
5965     }
5966 
5967     return NVL_SUCCESS;
5968 }
5969 
5970 static NvlStatus
5971 _nvswitch_service_nvltlc_tx_lnk_nonfatal_1_ls10
5972 (
5973     nvswitch_device *device,
5974     NvU32 nvlipt_instance,
5975     NvU32 link
5976 )
5977 {
5978     NvU32 pending, bit, unhandled;
5979     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
5980 
5981     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_1);
5982     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_1);
5983     report.mask = report.raw_enable;
5984     pending = report.raw_pending & report.mask;
5985 
5986     if (pending == 0)
5987     {
5988         return -NVL_NOT_FOUND;
5989     }
5990 
5991     unhandled = pending;
5992     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_1);
5993 
5994     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC0, 1);
5995     if (nvswitch_test_flags(pending, bit))
5996     {
5997         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC0, "AN1 Timeout VC0");
5998         nvswitch_clear_flags(&unhandled, bit);
5999     }
6000 
6001     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC1, 1);
6002     if (nvswitch_test_flags(pending, bit))
6003     {
6004         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC1, "AN1 Timeout VC1");
6005         nvswitch_clear_flags(&unhandled, bit);
6006     }
6007 
6008     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC2, 1);
6009     if (nvswitch_test_flags(pending, bit))
6010     {
6011         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC2, "AN1 Timeout VC2");
6012         nvswitch_clear_flags(&unhandled, bit);
6013     }
6014 
6015     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC3, 1);
6016     if (nvswitch_test_flags(pending, bit))
6017     {
6018         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC3, "AN1 Timeout VC3");
6019         nvswitch_clear_flags(&unhandled, bit);
6020     }
6021 
6022     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC4, 1);
6023     if (nvswitch_test_flags(pending, bit))
6024     {
6025         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC4, "AN1 Timeout VC4");
6026         nvswitch_clear_flags(&unhandled, bit);
6027     }
6028 
6029     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC5, 1);
6030     if (nvswitch_test_flags(pending, bit))
6031     {
6032         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC5, "AN1 Timeout VC5");
6033         nvswitch_clear_flags(&unhandled, bit);
6034     }
6035 
6036     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC6, 1);
6037     if (nvswitch_test_flags(pending, bit))
6038     {
6039         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC6, "AN1 Timeout VC6");
6040         nvswitch_clear_flags(&unhandled, bit);
6041     }
6042 
6043     bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _TIMEOUT_VC7, 1);
6044     if (nvswitch_test_flags(pending, bit))
6045     {
6046         NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC7, "AN1 Timeout VC7");
6047         nvswitch_clear_flags(&unhandled, bit);
6048     }
6049 
6050     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
6051 
6052     // Disable interrupts that have occurred after fatal error.
6053     if (device->link[link].fatal_error_occurred)
6054     {
6055         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_1,
6056                 report.raw_enable ^ pending);
6057     }
6058 
6059     if (report.raw_first & report.mask)
6060     {
6061         NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_1,
6062                 report.raw_first & report.mask);
6063     }
6064     NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_1, pending);
6065 
6066     if (unhandled != 0)
6067     {
6068         NVSWITCH_PRINT(device, WARN,
6069                 "%s: Unhandled NVLTLC_TX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
6070                  __FUNCTION__, link, pending, report.raw_enable);
6071         return -NVL_MORE_PROCESSING_REQUIRED;
6072     }
6073 
6074     return NVL_SUCCESS;
6075 }
6076 
6077 static NvlStatus
6078 _nvswitch_service_nvltlc_nonfatal_ls10
6079 (
6080     nvswitch_device *device,
6081     NvU32 nvlipt_instance
6082 )
6083 {
6084     NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
6085     NvU32 i;
6086     nvlink_link *link;
6087     NvlStatus status;
6088     NvlStatus return_status = NVL_SUCCESS;
6089 
6090     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
6091     localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
6092     localEnabledLinkMask = enabledLinkMask & localLinkMask;
6093 
6094     FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
6095     {
6096         link = nvswitch_get_link(device, i);
6097         if (link == NULL)
6098         {
6099             // An interrupt on an invalid link should never occur
6100             NVSWITCH_ASSERT(link != NULL);
6101             continue;
6102         }
6103 
6104         if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance)
6105         {
6106             NVSWITCH_ASSERT(0);
6107             break;
6108         }
6109 
6110         //
6111         // If link is in reset or NCISOC clock is off then
6112         // don't need to check the link for NVLTLC errors
6113         // as the IP's registers are off
6114         //
6115         if (nvswitch_is_link_in_reset(device, link) ||
6116             _nvswitch_is_ncisoc_clock_off_ls10(device, link))
6117         {
6118             continue;
6119         }
6120 
6121         status = _nvswitch_service_nvltlc_rx_lnk_nonfatal_0_ls10(device, nvlipt_instance, i);
6122         if (status != NVL_SUCCESS)
6123         {
6124             return_status = status;
6125         }
6126 
6127         status = _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10(device, nvlipt_instance, i);
6128         if (status != NVL_SUCCESS)
6129         {
6130             return_status = status;
6131         }
6132 
6133         status = _nvswitch_service_nvltlc_rx_lnk_nonfatal_1_ls10(device, nvlipt_instance, i);
6134         if (status != NVL_SUCCESS)
6135         {
6136             return_status = status;
6137         }
6138 
6139         status = _nvswitch_service_nvltlc_tx_lnk_nonfatal_1_ls10(device, nvlipt_instance, i);
6140         if (status != NVL_SUCCESS)
6141         {
6142             return_status = status;
6143         }
6144     }
6145     FOR_EACH_INDEX_IN_MASK_END;
6146 
6147     return return_status;
6148 }
6149 
6150 static NvlStatus
6151 _nvswitch_service_nvlipt_lnk_status_ls10
6152 (
6153     nvswitch_device *device,
6154     NvU32 nvlipt_instance,
6155     NvU32 link_id
6156 )
6157 {
6158     NvU32 pending, enabled, unhandled, bit;
6159     NvU64 mode;
6160     nvlink_link *link;
6161     link = nvswitch_get_link(device, link_id);
6162 
6163     pending =  NVSWITCH_LINK_RD32(device, link_id, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS);
6164     enabled =  NVSWITCH_LINK_RD32(device, link_id, NVLIPT_LNK, _NVLIPT_LNK, _INTR_INT1_EN);
6165     pending &= enabled;
6166     unhandled = pending;
6167 
6168     bit = DRF_NUM(_NVLIPT_LNK, _INTR_STATUS, _LINKSTATEREQUESTREADYSET, 1);
6169     if (nvswitch_test_flags(pending, bit))
6170     {
6171         nvswitch_clear_flags(&unhandled, bit);
6172         if(nvswitch_corelib_get_dl_link_mode_ls10(link, &mode) != NVL_SUCCESS)
6173         {
6174             NVSWITCH_PRINT(device, ERROR, "%s: nvlipt_lnk_status: Failed to check link mode! LinkId %d\n",
6175                         __FUNCTION__, link_id);
6176         }
6177         else if(mode == NVLINK_LINKSTATE_HS)
6178         {
6179             NVSWITCH_PRINT(device, INFO, "%s: nvlipt_lnk_status: Link is up!. LinkId %d\n",
6180                         __FUNCTION__, link_id);
6181             if (nvswitch_lib_notify_client_events(device,
6182                         NVSWITCH_DEVICE_EVENT_PORT_UP) != NVL_SUCCESS)
6183             {
6184                 NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_UP event. LinkId %d\n",
6185                             __FUNCTION__, link_id);
6186             }
6187 
6188             //
6189             // When a link comes up ensure that we finish off the post-training tasks:
6190             // -- enabling per-link DL interrupts
6191             // -- releasing buffer_ready on the link
6192             //
6193             nvswitch_corelib_training_complete_ls10(link);
6194             nvswitch_init_buffer_ready(device, link, NV_TRUE);
6195         }
6196     }
6197 
6198     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
6199     NVSWITCH_LINK_WR32(device, link_id, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS, pending);
6200 
6201     if (unhandled != 0)
6202     {
6203         NVSWITCH_PRINT(device, WARN,
6204                        "%s: Unhandled NVLIPT_LNK STATUS interrupts, pending: 0x%x enabled: 0x%x.\n",
6205                        __FUNCTION__, pending, enabled);
6206         return -NVL_MORE_PROCESSING_REQUIRED;
6207     }
6208 
6209     return NVL_SUCCESS;
6210 }
6211 
6212 static NvlStatus
6213 _nvswitch_service_nvlipt_lnk_nonfatal_ls10
6214 (
6215     nvswitch_device *device,
6216     NvU32 nvlipt_instance,
6217     NvU32 link
6218 )
6219 {
6220     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
6221     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
6222     NvU32 pending, bit, unhandled;
6223 
6224     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
6225     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_NON_FATAL_REPORT_EN_0);
6226     report.mask = report.raw_enable;
6227 
6228     pending = report.raw_pending & report.mask;
6229     if (pending == 0)
6230     {
6231         return -NVL_NOT_FOUND;
6232     }
6233 
6234     unhandled = pending;
6235     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0);
6236 
6237     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _ILLEGALLINKSTATEREQUEST, 1);
6238     if (nvswitch_test_flags(pending, bit))
6239     {
6240         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_ILLEGALLINKSTATEREQUEST, "_HW_NVLIPT_LNK_ILLEGALLINKSTATEREQUEST");
6241         nvswitch_clear_flags(&unhandled, bit);
6242     }
6243 
6244     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _FAILEDMINIONREQUEST, 1);
6245     if (nvswitch_test_flags(pending, bit))
6246     {
6247         chip_device->deferredLinkErrors[link].nonFatalIntrMask.liptLnk |= bit;
6248         _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
6249         nvswitch_clear_flags(&unhandled, bit);
6250     }
6251 
6252     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RESERVEDREQUESTVALUE, 1);
6253     if (nvswitch_test_flags(pending, bit))
6254     {
6255         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_RESERVEDREQUESTVALUE, "_RESERVEDREQUESTVALUE");
6256         nvswitch_clear_flags(&unhandled, bit);
6257     }
6258 
6259     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _LINKSTATEWRITEWHILEBUSY, 1);
6260     if (nvswitch_test_flags(pending, bit))
6261     {
6262         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_LINKSTATEWRITEWHILEBUSY, "_LINKSTATEWRITEWHILEBUSY");
6263         nvswitch_clear_flags(&unhandled, bit);
6264     }
6265 
6266     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _LINK_STATE_REQUEST_TIMEOUT, 1);
6267     if (nvswitch_test_flags(pending, bit))
6268     {
6269         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_LINK_STATE_REQUEST_TIMEOUT, "_LINK_STATE_REQUEST_TIMEOUT");
6270         nvswitch_clear_flags(&unhandled, bit);
6271     }
6272 
6273     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _WRITE_TO_LOCKED_SYSTEM_REG_ERR, 1);
6274     if (nvswitch_test_flags(pending, bit))
6275     {
6276         NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_WRITE_TO_LOCKED_SYSTEM_REG_ERR, "_WRITE_TO_LOCKED_SYSTEM_REG_ERR");
6277         nvswitch_clear_flags(&unhandled, bit);
6278     }
6279 
6280     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
6281 
6282     if (report.raw_first & report.mask)
6283     {
6284         NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0,
6285             report.raw_first & report.mask);
6286     }
6287     NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0, pending);
6288 
6289     if (unhandled != 0)
6290     {
6291         NVSWITCH_PRINT(device, WARN,
6292                 "%s: Unhandled NVLIPT_LNK NON_FATAL interrupts, pending: 0x%x enabled: 0x%x.\n",
6293                  __FUNCTION__, pending, report.raw_enable);
6294         return -NVL_MORE_PROCESSING_REQUIRED;
6295     }
6296 
6297     return NVL_SUCCESS;
6298 }
6299 
6300 static NvlStatus
6301 _nvswitch_service_nvlipt_link_nonfatal_ls10
6302 (
6303     nvswitch_device *device,
6304     NvU32 instance
6305 )
6306 {
6307     NvU32 i, globalLink, bit, intrLink;
6308     NvU32 interruptingLinks = 0;
6309     NvU32 lnkStatusChangeLinks = 0;
6310     NvlStatus status;
6311     NvU64 link_enable_mask;
6312 
6313     link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
6314         (NvU64)device->regkeys.link_enable_mask);
6315     for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
6316     {
6317         globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
6318         if ((NVBIT64(globalLink) & link_enable_mask) == 0)
6319         {
6320             continue;
6321         }
6322         intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
6323 
6324         if(intrLink)
6325         {
6326             interruptingLinks |= NVBIT(i);
6327         }
6328 
6329        intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS);
6330 
6331         if(intrLink)
6332         {
6333             lnkStatusChangeLinks |= NVBIT(i);
6334         }
6335     }
6336 
6337     if(lnkStatusChangeLinks)
6338     {
6339         for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
6340         {
6341             bit = NVBIT(i);
6342             globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
6343             if (nvswitch_test_flags(lnkStatusChangeLinks, bit))
6344             {
6345                 if( _nvswitch_service_nvlipt_lnk_status_ls10(device, instance, globalLink) != NVL_SUCCESS)
6346                 {
6347                     NVSWITCH_PRINT(device, WARN, "%s: Could not process nvlipt link status interrupt. Continuing. LinkId %d\n",
6348                             __FUNCTION__, globalLink);
6349                 }
6350             }
6351         }
6352     }
6353 
6354     if(interruptingLinks)
6355     {
6356         for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
6357         {
6358             bit = NVBIT(i);
6359             globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
6360             if (nvswitch_test_flags(interruptingLinks, bit))
6361             {
6362                 status = _nvswitch_service_nvlipt_lnk_nonfatal_ls10(device, instance, globalLink);
6363                 if (status != NVL_SUCCESS && status != -NVL_NOT_FOUND)
6364                 {
6365                     return -NVL_MORE_PROCESSING_REQUIRED;
6366                 }
6367             }
6368         }
6369         return NVL_SUCCESS;
6370     }
6371     else
6372     {
6373         return -NVL_NOT_FOUND;
6374     }
6375 }
6376 
6377 
6378 NvlStatus
6379 _nvswitch_service_minion_fatal_ls10
6380 (
6381     nvswitch_device *device,
6382     NvU32 instance
6383 )
6384 {
6385     NvU32 pending, bit, unhandled, mask;
6386 
6387     pending = NVSWITCH_MINION_RD32_LS10(device, instance, _MINION, _MINION_INTR);
6388     mask =  NVSWITCH_MINION_RD32_LS10(device, instance, _MINION, _MINION_INTR_STALL_EN);
6389 
6390     // Don't consider MINION Link interrupts in this handler
6391     mask &= ~(DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK, NV_MINION_MINION_INTR_STALL_EN_LINK_ENABLE_ALL));
6392 
6393     pending &= mask;
6394 
6395     if (pending == 0)
6396     {
6397         return -NVL_NOT_FOUND;
6398     }
6399 
6400     unhandled = pending;
6401 
6402     bit = DRF_NUM(_MINION, _MINION_INTR, _FALCON_STALL, 0x1);
6403     if (nvswitch_test_flags(pending, bit))
6404     {
6405         if (nvswitch_minion_service_falcon_interrupts_ls10(device, instance) == NVL_SUCCESS)
6406         {
6407             nvswitch_clear_flags(&unhandled, bit);
6408         }
6409     }
6410 
6411     bit =  DRF_NUM(_MINION, _MINION_INTR, _NONFATAL, 0x1);
6412     if (nvswitch_test_flags(pending, bit))
6413     {
6414         NVSWITCH_PRINT(device, ERROR, "%s: servicing minion nonfatal interrupt\n",
6415          __FUNCTION__);
6416         NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _MINION_INTR, bit);
6417         nvswitch_clear_flags(&unhandled, bit);
6418     }
6419 
6420     bit =  DRF_NUM(_MINION, _MINION_INTR, _FATAL, 0x1);
6421     if (nvswitch_test_flags(pending, bit))
6422     {
6423         NVSWITCH_PRINT(device, ERROR, "%s: servicing minion fatal interrupt\n",
6424          __FUNCTION__);
6425         NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _MINION_INTR, bit);
6426         nvswitch_clear_flags(&unhandled, bit);
6427     }
6428 
6429     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
6430 
6431     if (unhandled != 0)
6432     {
6433         return -NVL_MORE_PROCESSING_REQUIRED;
6434     }
6435 
6436     return NVL_SUCCESS;
6437 }
6438 
6439 static NvlStatus
6440 _nvswitch_service_nvlw_nonfatal_ls10
6441 (
6442     nvswitch_device *device,
6443     NvU32 instance
6444 )
6445 {
6446     NvlStatus status[3];
6447 
6448     status[0] = _nvswitch_service_nvldl_nonfatal_ls10(device, instance);
6449     status[1] = _nvswitch_service_nvltlc_nonfatal_ls10(device, instance);
6450     status[2] = _nvswitch_service_nvlipt_link_nonfatal_ls10(device, instance);
6451 
6452     if ((status[0] != NVL_SUCCESS) && (status[0] != -NVL_NOT_FOUND) &&
6453         (status[1] != NVL_SUCCESS) && (status[1] != -NVL_NOT_FOUND) &&
6454         (status[2] != NVL_SUCCESS) && (status[2] != -NVL_NOT_FOUND))
6455     {
6456         return -NVL_MORE_PROCESSING_REQUIRED;
6457     }
6458 
6459     return NVL_SUCCESS;
6460 }
6461 
6462 #if 0
6463 static NvlStatus
6464 _nvswitch_service_soe_fatal_ls10
6465 (
6466     nvswitch_device *device
6467 )
6468 {
6469     // We only support 1 SOE as of LS10.
6470     if (soeService_HAL(device, (PSOE)device->pSoe) != 0)
6471     {
6472         return -NVL_MORE_PROCESSING_REQUIRED;
6473     }
6474 
6475     return NVL_SUCCESS;
6476 }
6477 #endif  //0
6478 
6479 static NvlStatus
6480 _nvswitch_service_nvlipt_lnk_fatal_ls10
6481 (
6482     nvswitch_device *device,
6483     NvU32 nvlipt_instance,
6484     NvU32 link
6485 )
6486 {
6487     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
6488     NvU32 pending, bit, unhandled;
6489 
6490     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
6491     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FATAL_REPORT_EN_0);
6492     report.mask = report.raw_enable;
6493 
6494     pending = report.raw_pending & report.mask;
6495     if (pending == 0)
6496     {
6497         return -NVL_NOT_FOUND;
6498     }
6499 
6500     unhandled = pending;
6501     report.raw_first = NVSWITCH_LINK_RD32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0);
6502 
6503     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _SLEEPWHILEACTIVELINK, 1);
6504     if (nvswitch_test_flags(pending, bit))
6505     {
6506         NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_SLEEPWHILEACTIVELINK, "No non-empty link is detected", NV_FALSE);
6507         nvswitch_clear_flags(&unhandled, bit);
6508     }
6509 
6510     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RSTSEQ_PHYCTL_TIMEOUT, 1);
6511     if (nvswitch_test_flags(pending, bit))
6512     {
6513         NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_RSTSEQ_PHYCTL_TIMEOUT, "Reset sequencer timed out waiting for a handshake from PHYCTL", NV_FALSE);
6514         nvswitch_clear_flags(&unhandled, bit);
6515     }
6516 
6517     bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RSTSEQ_CLKCTL_TIMEOUT, 1);
6518     if (nvswitch_test_flags(pending, bit))
6519     {
6520         NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_RSTSEQ_CLKCTL_TIMEOUT, "Reset sequencer timed out waiting for a handshake from CLKCTL", NV_FALSE);
6521         nvswitch_clear_flags(&unhandled, bit);
6522     }
6523 
6524     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
6525 
6526     // Disable interrupts that have occurred after fatal error.
6527     if (device->link[link].fatal_error_occurred)
6528     {
6529         NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FATAL_REPORT_EN_0,
6530                 report.raw_enable ^ pending);
6531     }
6532 
6533     // clear interrupts
6534     if (report.raw_first & report.mask)
6535     {
6536         NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0,
6537                 report.raw_first & report.mask);
6538     }
6539     NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0, pending);
6540 
6541     if (unhandled != 0)
6542     {
6543         NVSWITCH_PRINT(device, WARN,
6544                 "%s: Unhandled NVLIPT_LNK FATAL interrupts, pending: 0x%x enabled: 0x%x.\n",
6545                  __FUNCTION__, pending, report.raw_enable);
6546         return -NVL_MORE_PROCESSING_REQUIRED;
6547     }
6548 
6549     return NVL_SUCCESS;
6550 }
6551 
6552 static NvlStatus
6553 _nvswitch_service_nvlipt_link_fatal_ls10
6554 (
6555     nvswitch_device *device,
6556     NvU32 instance
6557 )
6558 {
6559     NvU32 i, globalLink, bit, intrLink;
6560     NvU32 interruptingLinks = 0;
6561 
6562     //read in error status of current link
6563     for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
6564     {
6565         globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
6566 
6567         intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
6568 
6569         if(intrLink)
6570         {
6571             interruptingLinks |= NVBIT(i);
6572         }
6573     }
6574 
6575     if(interruptingLinks)
6576     {
6577         for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
6578         {
6579             bit = NVBIT(i);
6580             globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
6581             if (nvswitch_test_flags(interruptingLinks, bit))
6582             {
6583                 if( _nvswitch_service_nvlipt_lnk_fatal_ls10(device, instance, globalLink) != NVL_SUCCESS)
6584                 {
6585                     return -NVL_MORE_PROCESSING_REQUIRED;
6586                 }
6587             }
6588         }
6589         return NVL_SUCCESS;
6590     }
6591     else
6592     {
6593         return -NVL_NOT_FOUND;
6594     }
6595 }
6596 
6597 static NvlStatus
6598 _nvswitch_service_nvlw_fatal_ls10
6599 (
6600     nvswitch_device *device,
6601     NvU32 instance
6602 )
6603 {
6604     NvlStatus status[6];
6605 
6606     status[0] = device->hal.nvswitch_service_minion_link(device, instance);
6607     status[1] = _nvswitch_service_nvldl_fatal_ls10(device, instance);
6608     status[2] = _nvswitch_service_nvltlc_fatal_ls10(device, instance);
6609     status[3] = _nvswitch_service_minion_fatal_ls10(device, instance);
6610     status[4] = _nvswitch_service_nvlipt_common_fatal_ls10(device, instance);
6611     status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance);
6612 
6613 
6614     if (status[0] != NVL_SUCCESS && status[0] != -NVL_NOT_FOUND &&
6615         status[1] != NVL_SUCCESS && status[1] != -NVL_NOT_FOUND &&
6616         status[2] != NVL_SUCCESS && status[2] != -NVL_NOT_FOUND &&
6617         status[3] != NVL_SUCCESS && status[3] != -NVL_NOT_FOUND &&
6618         status[4] != NVL_SUCCESS && status[4] != -NVL_NOT_FOUND &&
6619         status[5] != NVL_SUCCESS && status[5] != -NVL_NOT_FOUND)
6620     {
6621         return -NVL_MORE_PROCESSING_REQUIRED;
6622     }
6623 
6624     return NVL_SUCCESS;
6625 }
6626 
6627 /*
6628  * @Brief : Enable top level HW interrupts.
6629  *
6630  * @Description :
6631  *
6632  * @param[in] device        operate on this device
6633  */
6634 void
6635 nvswitch_lib_enable_interrupts_ls10
6636 (
6637     nvswitch_device *device
6638 )
6639 {
6640     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NPG_FATAL_IDX), 0xFFFF);
6641     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX), 0xFFFF);
6642     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NPG_CORRECTABLE_IDX), 0);
6643 
6644     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NVLW_FATAL_IDX), 0xFFFF);
6645     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NVLW_NON_FATAL_IDX), 0xFFFF);
6646     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NVLW_CORRECTABLE_IDX), 0);
6647 
6648     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_NXBAR_FATAL_IDX), 0x7);
6649 
6650     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_UNITS_IDX), 0xFFFFFFFF);
6651     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_SET(NV_CTRL_CPU_INTR_UNITS_IDX),
6652         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PMGR_HOST, 1) |
6653         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER, 1) |
6654         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER_ALARM, 1) |
6655         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XTL_CPU, 1) |
6656         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XAL_EP, 1) |
6657         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PRIV_RING, 1));
6658 
6659     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_TOP_EN_SET(0), 0xFFFFFFFF);
6660 }
6661 
6662 /*
6663  * @Brief : Disable top level HW interrupts.
6664  *
6665  * @Description :
6666  *
6667  * @param[in] device        operate on this device
6668  */
6669 void
6670 nvswitch_lib_disable_interrupts_ls10
6671 (
6672     nvswitch_device *device
6673 )
6674 {
6675     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_FATAL_IDX), 0xFFFF);
6676     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX), 0xFFFF);
6677     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_CORRECTABLE_IDX), 0);
6678 
6679     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NVLW_FATAL_IDX), 0xFFFF);
6680     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NVLW_NON_FATAL_IDX), 0xFFFF);
6681     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NVLW_CORRECTABLE_IDX), 0);
6682 
6683     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NXBAR_FATAL_IDX), 0x7);
6684 
6685     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_UNITS_IDX),
6686         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PMGR_HOST, 1) |
6687         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER, 1) |
6688         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER_ALARM, 1) |
6689         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XTL_CPU, 1) |
6690         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XAL_EP, 1) |
6691         DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PRIV_RING, 1));
6692 
6693     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_TOP_EN_CLEAR(0), 0xFFFFFFFF);
6694 }
6695 
6696 //
6697 // Check if there are interrupts pending.
6698 //
6699 // On silicon/emulation we only use MSIs which are not shared, so this
6700 // function does not need to be called.
6701 //
6702 NvlStatus
6703 nvswitch_lib_check_interrupts_ls10
6704 (
6705     nvswitch_device *device
6706 )
6707 {
6708     NvlStatus retval = NVL_SUCCESS;
6709     NvU32 val;
6710 
6711     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_TOP(0));
6712     if (DRF_NUM(_CTRL, _CPU_INTR_TOP, _VALUE, val) != 0)
6713     {
6714         retval = -NVL_MORE_PROCESSING_REQUIRED;
6715     }
6716 
6717     return retval;
6718 }
6719 
6720 static void
6721 _nvswitch_retrigger_engine_intr_ls10
6722 (
6723     nvswitch_device *device
6724 )
6725 {
6726 
6727     // re-trigger engine to gin interrupts for CPR and NPG on the FATAL and NONFATAL trees
6728     NVSWITCH_BCAST_WR32_LS10(device, CPR, _CPR_SYS, _INTR_RETRIGGER(0), 1);
6729     NVSWITCH_BCAST_WR32_LS10(device, CPR, _CPR_SYS, _INTR_RETRIGGER(1), 1);
6730 
6731     NVSWITCH_BCAST_WR32_LS10(device, NPG, _NPG, _INTR_RETRIGGER(0), 1);
6732     NVSWITCH_BCAST_WR32_LS10(device, NPG, _NPG, _INTR_RETRIGGER(1), 1);
6733 }
6734 
6735 void
6736 nvswitch_service_minion_all_links_ls10
6737 (
6738     nvswitch_device *device
6739 )
6740 {
6741     NvU32 val, i;
6742 
6743     // Check NVLW
6744     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NVLW_FATAL);
6745     val = DRF_NUM(_CTRL, _CPU_INTR_NVLW_FATAL, _MASK, val);
6746     if (val != 0)
6747     {
6748         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL,
6749             _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NVLW_FATAL_IDX), val);
6750 
6751         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NVLW_FATAL_MASK); i++)
6752         {
6753             if (val & NVBIT(i))
6754                 (void)_nvswitch_service_nvlw_fatal_ls10(device, i);
6755         }
6756     }
6757 }
6758 
6759 //
6760 // Service interrupt and re-enable interrupts. Interrupts should disabled when
6761 // this is called.
6762 //
6763 NvlStatus
6764 nvswitch_lib_service_interrupts_ls10
6765 (
6766     nvswitch_device *device
6767 )
6768 {
6769     NvlStatus   status = NVL_SUCCESS;
6770     NvlStatus   return_status = NVL_SUCCESS;
6771     NvU32 val;
6772     NvU32 i;
6773 
6774     //
6775     // Interrupt handler steps:
6776     // 1. Read Leaf interrupt
6777     // 2. Clear leaf interrupt
6778     // 3. Run leaf specific interrupt handler
6779     //
6780     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NVLW_FATAL);
6781     val = DRF_NUM(_CTRL, _CPU_INTR_NVLW_FATAL, _MASK, val);
6782     if (val != 0)
6783     {
6784         NVSWITCH_PRINT(device, INFO, "%s: NVLW FATAL interrupts pending = 0x%x\n",
6785             __FUNCTION__, val);
6786 
6787         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NVLW_FATAL_IDX), val);
6788 
6789         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NVLW_FATAL_MASK); i++)
6790         {
6791             if (val & NVBIT(i))
6792             {
6793                 status = _nvswitch_service_nvlw_fatal_ls10(device, i);
6794                 if (status != NVL_SUCCESS)
6795                 {
6796                     NVSWITCH_PRINT(device, INFO, "%s: NVLW[%d] FATAL interrupt handling status = %d\n",
6797                         __FUNCTION__, i, status);
6798                     return_status = status;
6799                 }
6800             }
6801         }
6802     }
6803 
6804     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NVLW_NON_FATAL);
6805     val = DRF_NUM(_CTRL, _CPU_INTR_NVLW_NON_FATAL, _MASK, val);
6806     if (val != 0)
6807     {
6808         NVSWITCH_PRINT(device, INFO, "%s: NVLW NON_FATAL interrupts pending = 0x%x\n",
6809             __FUNCTION__, val);
6810         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NVLW_NON_FATAL_IDX), val);
6811         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NVLW_NON_FATAL_MASK); i++)
6812         {
6813             if (val & NVBIT(i))
6814             {
6815                 status = _nvswitch_service_nvlw_nonfatal_ls10(device, i);
6816                 if (status != NVL_SUCCESS)
6817                 {
6818                     NVSWITCH_PRINT(device, INFO, "%s: NVLW[%d] NON_FATAL interrupt handling status = %d\n",
6819                         __FUNCTION__, i, status);
6820                     return_status = status;
6821                 }
6822             }
6823         }
6824     }
6825 
6826     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NVLW_CORRECTABLE);
6827     val = DRF_NUM(_CTRL, _CPU_INTR_NVLW_CORRECTABLE, _MASK, val);
6828     if (val != 0)
6829     {
6830         NVSWITCH_PRINT(device, ERROR, "%s: NVLW CORRECTABLE interrupts pending = 0x%x\n",
6831             __FUNCTION__, val);
6832         return_status = -NVL_MORE_PROCESSING_REQUIRED;
6833     }
6834 
6835     // Check NPG
6836     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NPG_FATAL);
6837     val = DRF_NUM(_CTRL, _CPU_INTR_NPG_FATAL, _MASK, val);
6838     if (val != 0)
6839     {
6840         NVSWITCH_PRINT(device, INFO, "%s: NPG FATAL interrupts pending = 0x%x\n",
6841             __FUNCTION__, val);
6842         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NPG_FATAL_IDX), val);
6843         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NPG_FATAL_MASK); i++)
6844         {
6845             if (val & NVBIT(i))
6846             {
6847                 status = _nvswitch_service_npg_fatal_ls10(device, i);
6848                 if (status != NVL_SUCCESS)
6849                 {
6850                     NVSWITCH_PRINT(device, INFO, "%s: NPG[%d] FATAL interrupt handling status = %d\n",
6851                         __FUNCTION__, i, status);
6852                     return_status = status;
6853                 }
6854             }
6855         }
6856     }
6857 
6858     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NPG_NON_FATAL);
6859     val = DRF_NUM(_CTRL, _CPU_INTR_NPG_NON_FATAL, _MASK, val);
6860     if (val != 0)
6861     {
6862         NVSWITCH_PRINT(device, INFO, "%s: NPG NON_FATAL interrupts pending = 0x%x\n",
6863             __FUNCTION__, val);
6864         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX), val);
6865         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NPG_NON_FATAL_MASK); i++)
6866         {
6867             if (val & NVBIT(i))
6868             {
6869                 status = _nvswitch_service_npg_nonfatal_ls10(device, i);
6870                 if (status != NVL_SUCCESS)
6871                 {
6872                     NVSWITCH_PRINT(device, INFO, "%s: NPG[%d] NON_FATAL interrupt handling status = %d\n",
6873                         __FUNCTION__, i, status);
6874                     return_status = status;
6875                 }
6876             }
6877         }
6878     }
6879 
6880     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NPG_CORRECTABLE);
6881     val = DRF_NUM(_CTRL, _CPU_INTR_NPG_CORRECTABLE, _MASK, val);
6882     if (val != 0)
6883     {
6884         NVSWITCH_PRINT(device, ERROR, "%s: NPG CORRECTABLE interrupts pending = 0x%x\n",
6885             __FUNCTION__, val);
6886         return_status = -NVL_MORE_PROCESSING_REQUIRED;
6887     }
6888 
6889     // Check NXBAR
6890     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_NXBAR_FATAL);
6891     val = DRF_NUM(_CTRL, _CPU_INTR_NXBAR_FATAL, _MASK, val);
6892     if (val != 0)
6893     {
6894         NVSWITCH_PRINT(device, INFO, "%s: NXBAR FATAL interrupts pending = 0x%x\n",
6895             __FUNCTION__, val);
6896         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_NXBAR_FATAL_IDX), val);
6897         for (i = 0; i < DRF_SIZE(NV_CTRL_CPU_INTR_NXBAR_FATAL_MASK); i++)
6898         {
6899             if (val & NVBIT(i))
6900             {
6901                 status = _nvswitch_service_nxbar_fatal_ls10(device, i);
6902                 if (status != NVL_SUCCESS)
6903                 {
6904                     NVSWITCH_PRINT(device, INFO, "%s: NXBAR[%d] FATAL interrupt handling status = %d\n",
6905                         __FUNCTION__, i, status);
6906                     return_status = status;
6907                 }
6908             }
6909         }
6910     }
6911 
6912     // Check UNITS
6913     val = NVSWITCH_ENG_RD32(device, GIN, , 0, _CTRL, _CPU_INTR_UNITS);
6914     if (val != 0)
6915     {
6916         NVSWITCH_PRINT(device, INFO, "%s: UNIT interrupts pending = 0x%x\n",
6917             __FUNCTION__, val);
6918 
6919         NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF(NV_CTRL_CPU_INTR_UNITS_IDX), val);
6920         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PMGR_HOST, 1, val))
6921         {
6922             NVSWITCH_PRINT(device, ERROR, "%s: _PMGR_HOST interrupt pending\n",
6923                 __FUNCTION__);
6924             return_status = -NVL_MORE_PROCESSING_REQUIRED;
6925         }
6926         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER, 1, val))
6927         {
6928             NVSWITCH_PRINT(device, ERROR, "%s: _PTIMER interrupt pending\n",
6929                 __FUNCTION__);
6930             return_status = -NVL_MORE_PROCESSING_REQUIRED;
6931         }
6932         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PTIMER_ALARM, 1, val))
6933         {
6934             NVSWITCH_PRINT(device, ERROR, "%s: _PTIMER_ALARM interrupt pending\n",
6935                 __FUNCTION__);
6936             return_status = -NVL_MORE_PROCESSING_REQUIRED;
6937         }
6938         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XTL_CPU, 1, val))
6939         {
6940             NVSWITCH_PRINT(device, ERROR, "%s: _XTL_CPU interrupt pending\n",
6941                 __FUNCTION__);
6942             return_status = -NVL_MORE_PROCESSING_REQUIRED;
6943         }
6944         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _XAL_EP, 1, val))
6945         {
6946             NVSWITCH_PRINT(device, ERROR, "%s: _XAL_EP interrupt pending\n",
6947                 __FUNCTION__);
6948             return_status = -NVL_MORE_PROCESSING_REQUIRED;
6949         }
6950         if (FLD_TEST_DRF_NUM(_CTRL, _CPU_INTR_UNITS, _PRIV_RING, 1, val))
6951         {
6952             status = _nvswitch_service_priv_ring_ls10(device);
6953             if (status != NVL_SUCCESS)
6954             {
6955                 NVSWITCH_PRINT(device, ERROR, "%s: Problem handling PRI errors\n",
6956                     __FUNCTION__);
6957                 return_status = status;
6958             }
6959         }
6960     }
6961 
6962     // step 4 -- retrigger engine interrupts
6963     _nvswitch_retrigger_engine_intr_ls10(device);
6964 
6965     // step 5 -- retrigger top level GIN interrupts
6966     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_TOP_EN_CLEAR(0), 0xFFFFFFFF);
6967     NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_TOP_EN_SET(0), 0xFFFFFFFF);
6968 
6969     return return_status;
6970 }
6971 
6972 /*
6973  * Initialize interrupt tree HW for all units.
6974  *
6975  * Init and servicing both depend on bits matching across STATUS/MASK
6976  * and IErr STATUS/LOG/REPORT/CONTAIN registers.
6977  */
6978 void
6979 nvswitch_initialize_interrupt_tree_ls10
6980 (
6981     nvswitch_device *device
6982 )
6983 {
6984     NvU64 link_mask = nvswitch_get_enabled_link_mask(device);
6985     NvU32 i, val;
6986 
6987     // NPG/NPORT
6988     _nvswitch_initialize_nport_interrupts_ls10(device);
6989 
6990     // NXBAR
6991     _nvswitch_initialize_nxbar_interrupts_ls10(device);
6992 
6993     FOR_EACH_INDEX_IN_MASK(64, i, link_mask)
6994     {
6995         val = NVSWITCH_LINK_RD32(device, i,
6996                   NVLW, _NVLW, _LINK_INTR_0_MASK(i));
6997         val = FLD_SET_DRF(_NVLW, _LINK_INTR_0_MASK, _FATAL,       _ENABLE, val);
6998         val = FLD_SET_DRF(_NVLW, _LINK_INTR_0_MASK, _NONFATAL,    _ENABLE, val);
6999         val = FLD_SET_DRF(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, _ENABLE, val);
7000         val = FLD_SET_DRF(_NVLW, _LINK_INTR_0_MASK, _INTR0,       _ENABLE, val);
7001         val = FLD_SET_DRF(_NVLW, _LINK_INTR_0_MASK, _INTR1,       _ENABLE, val);
7002         NVSWITCH_LINK_WR32(device, i, NVLW, _NVLW, _LINK_INTR_0_MASK(i), val);
7003     }
7004     FOR_EACH_INDEX_IN_MASK_END;
7005 
7006     FOR_EACH_INDEX_IN_MASK(64, i, link_mask)
7007     {
7008         val = NVSWITCH_LINK_RD32(device, i,
7009                   NVLW, _NVLW, _LINK_INTR_1_MASK(i));
7010         val = FLD_SET_DRF(_NVLW, _LINK_INTR_1_MASK, _FATAL,       _ENABLE, val);
7011         val = FLD_SET_DRF(_NVLW, _LINK_INTR_1_MASK, _NONFATAL,    _ENABLE, val);
7012         val = FLD_SET_DRF(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, _ENABLE, val);
7013         val = FLD_SET_DRF(_NVLW, _LINK_INTR_1_MASK, _INTR0,       _ENABLE, val);
7014         val = FLD_SET_DRF(_NVLW, _LINK_INTR_1_MASK, _INTR1,       _ENABLE, val);
7015         NVSWITCH_LINK_WR32(device, i, NVLW, _NVLW, _LINK_INTR_1_MASK(i), val);
7016     }
7017     FOR_EACH_INDEX_IN_MASK_END;
7018 
7019    // NVLIPT
7020     _nvswitch_initialize_nvlipt_interrupts_ls10(device);
7021 }
7022 
7023 //
7024 // Service Nvswitch NVLDL Fatal interrupts
7025 //
7026 NvlStatus
7027 nvswitch_service_nvldl_fatal_link_ls10
7028 (
7029     nvswitch_device *device,
7030     NvU32 nvlipt_instance,
7031     NvU32 link
7032 )
7033 {
7034     ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
7035     NvU32 pending, bit, unhandled;
7036     NvBool bSkipIntrClear = NV_FALSE;
7037 
7038     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
7039 
7040     report.raw_pending = NVSWITCH_LINK_RD32(device, link, NVLDL, _NVLDL_TOP, _INTR);
7041     report.raw_enable = NVSWITCH_LINK_RD32(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN);
7042     report.mask = report.raw_enable;
7043     pending = report.raw_pending & report.mask;
7044 
7045     if (pending == 0)
7046     {
7047         return -NVL_NOT_FOUND;
7048     }
7049 
7050     unhandled = pending;
7051 
7052     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_RAM, 1);
7053     if (nvswitch_test_flags(pending, bit))
7054     {
7055         NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_RAM, "TX Fault Ram", NV_FALSE);
7056         nvswitch_clear_flags(&unhandled, bit);
7057     }
7058 
7059     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_INTERFACE, 1);
7060     if (nvswitch_test_flags(pending, bit))
7061     {
7062         NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_INTERFACE, "TX Fault Interface", NV_FALSE);
7063         nvswitch_clear_flags(&unhandled, bit);
7064     }
7065 
7066     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_SUBLINK_CHANGE, 1);
7067     if (nvswitch_test_flags(pending, bit))
7068     {
7069         NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_SUBLINK_CHANGE, "TX Fault Sublink Change", NV_FALSE);
7070         nvswitch_clear_flags(&unhandled, bit);
7071     }
7072 
7073     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_FAULT_SUBLINK_CHANGE, 1);
7074     if (nvswitch_test_flags(pending, bit))
7075     {
7076         NVSWITCH_REPORT_FATAL(_HW_DLPL_RX_FAULT_SUBLINK_CHANGE, "RX Fault Sublink Change", NV_FALSE);
7077         nvswitch_clear_flags(&unhandled, bit);
7078     }
7079 
7080     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_FAULT_DL_PROTOCOL, 1);
7081     if (nvswitch_test_flags(pending, bit))
7082     {
7083         NVSWITCH_REPORT_FATAL(_HW_DLPL_RX_FAULT_DL_PROTOCOL, "RX Fault DL Protocol", NV_FALSE);
7084         nvswitch_clear_flags(&unhandled, bit);
7085     }
7086 
7087     bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1);
7088     if (nvswitch_test_flags(pending, bit))
7089     {
7090         NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE);
7091         nvswitch_clear_flags(&unhandled, bit);
7092     }
7093 
7094     bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_PROTOCOL, 1);
7095     if (nvswitch_test_flags(pending, bit))
7096     {
7097         NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_PROTOCOL, "LTSSM Protocol Error", NV_FALSE);
7098         nvswitch_clear_flags(&unhandled, bit);
7099     }
7100 
7101     bit = DRF_NUM(_NVLDL_TOP, _INTR, _PHY_A, 1);
7102     if (nvswitch_test_flags(pending, bit))
7103     {
7104         NVSWITCH_REPORT_FATAL(_HW_DLPL_PHY_A, "PHY_A Error", NV_FALSE);
7105         nvswitch_clear_flags(&unhandled, bit);
7106     }
7107 
7108     bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_PL_ERROR, 1);
7109     if (nvswitch_test_flags(pending, bit))
7110     {
7111         NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_PL_ERROR, "TX_PL Error", NV_FALSE);
7112         nvswitch_clear_flags(&unhandled, bit);
7113    }
7114 
7115     bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_PL_ERROR, 1);
7116     if (nvswitch_test_flags(pending, bit))
7117     {
7118         NVSWITCH_REPORT_FATAL(_HW_DLPL_RX_PL_ERROR, "RX_PL Error", NV_FALSE);
7119         nvswitch_clear_flags(&unhandled, bit);
7120     }
7121 
7122     //
7123     // Note: LTSSM_FAULT_UP must be the last interrupt serviced in the NVLDL
7124     // Fatal tree. The last step of handling this interrupt is going into the
7125     // reset_and_drain flow for the given link which will shutdown and reset
7126     // the link. The reset portion will also wipe away any link state including
7127     // pending DL interrupts. In order to log all error before wiping that state,
7128     // service all other interrupts before this one
7129     //
7130     bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
7131     if (nvswitch_test_flags(pending, bit))
7132     {
7133         chip_device->deferredLinkErrors[link].fatalIntrMask.dl |= bit;
7134         _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
7135 
7136         nvswitch_clear_flags(&unhandled, bit);
7137         device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link));
7138 
7139         //
7140         // Since reset and drain will reset the link, including clearing
7141         // pending interrupts, skip the clear write below. There are cases
7142         // where link clocks will not be on after reset and drain so there
7143         // maybe PRI errors on writing to the register
7144         //
7145         bSkipIntrClear = NV_TRUE;
7146     }
7147 
7148     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
7149 
7150     // Disable interrupts that have occurred after fatal error.
7151     if (device->link[link].fatal_error_occurred)
7152     {
7153         NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN,
7154                 report.raw_enable ^ pending);
7155     }
7156 
7157     if (!bSkipIntrClear)
7158     {
7159         NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR, pending);
7160     }
7161 
7162     if (unhandled != 0)
7163     {
7164         NVSWITCH_PRINT(device, WARN,
7165                 "%s: Unhandled NVLDL fatal interrupts, link: %d pending: 0x%x enabled: 0x%x.\n",
7166                  __FUNCTION__, link, pending, report.raw_enable);
7167         return -NVL_MORE_PROCESSING_REQUIRED;
7168     }
7169 
7170     return NVL_SUCCESS;
7171 }
7172 
7173 NvlStatus
7174 nvswitch_service_minion_link_ls10
7175 (
7176     nvswitch_device *device,
7177     NvU32 instance
7178 )
7179 {
7180     NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
7181     NvU32 pending, unhandled, minionIntr, linkIntr, reg, enabledLinks, bit;
7182     NvU32 localLinkIdx, link;
7183 
7184     //
7185     // _MINION_MINION_INTR shows all interrupts currently at the host on this minion
7186     // Note: _MINIO_MINION_INTR is not used to clear link specific interrupts
7187     //
7188     minionIntr = NVSWITCH_MINION_RD32_LS10(device, instance, _MINION, _MINION_INTR);
7189 
7190     // get all possible interrupting links associated with this minion
7191     report.raw_pending = DRF_VAL(_MINION, _MINION_INTR, _LINK, minionIntr);
7192 
7193     // read in the enaled minion interrupts on this minion
7194     reg = NVSWITCH_MINION_RD32_LS10(device, instance, _MINION, _MINION_INTR_STALL_EN);
7195 
7196     // get the links with enabled interrupts on this minion
7197     enabledLinks = DRF_VAL(_MINION, _MINION_INTR_STALL_EN, _LINK, reg);
7198 
7199     report.raw_enable = enabledLinks;
7200     report.mask = report.raw_enable;
7201 
7202     // pending bit field contains interrupting links after being filtered
7203     pending = report.raw_pending & report.mask;
7204 
7205     if (pending == 0)
7206     {
7207         return -NVL_NOT_FOUND;
7208     }
7209 
7210     unhandled = pending;
7211 
7212     FOR_EACH_INDEX_IN_MASK(32, localLinkIdx, pending)
7213     {
7214         link = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + localLinkIdx;
7215         bit = NVBIT(localLinkIdx);
7216 
7217         // read in the interrupt register for the given link
7218         linkIntr = NVSWITCH_MINION_LINK_RD32_LS10(device, link, _MINION, _NVLINK_LINK_INTR(localLinkIdx));
7219 
7220         // _STATE must be set for _CODE to be valid
7221         if (!DRF_VAL(_MINION, _NVLINK_LINK_INTR, _STATE, linkIntr))
7222         {
7223             continue;
7224         }
7225 
7226         NVSWITCH_PRINT(device, INFO,
7227                 "%s: link[%d] {%d, %d} linkIntr = 0x%x\n",
7228                  __FUNCTION__, link, instance, localLinkIdx, linkIntr);
7229 
7230         //
7231         // _MINION_INTR_LINK is a read-only register field for the host
7232         // Host must write 1 to _NVLINK_LINK_INTR_STATE to clear the interrupt on the link
7233         //
7234         reg = DRF_NUM(_MINION, _NVLINK_LINK_INTR, _STATE, 1);
7235         NVSWITCH_MINION_WR32_LS10(device, instance, _MINION, _NVLINK_LINK_INTR(localLinkIdx), reg);
7236 
7237         report.data[0] = linkIntr;
7238 
7239         switch(DRF_VAL(_MINION, _NVLINK_LINK_INTR, _CODE, linkIntr))
7240         {
7241             case NV_MINION_NVLINK_LINK_INTR_CODE_NA:
7242                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link NA interrupt", NV_FALSE);
7243                 break;
7244             case NV_MINION_NVLINK_LINK_INTR_CODE_SWREQ:
7245                 NVSWITCH_PRINT(device, INFO,
7246                       "%s: Received MINION Link SW Generate interrupt on MINION %d : link %d.\n",
7247                       __FUNCTION__, instance, link);
7248                 break;
7249             case NV_MINION_NVLINK_LINK_INTR_CODE_DLREQ:
7250                 NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link DLREQ interrupt");
7251                 break;
7252             case NV_MINION_NVLINK_LINK_INTR_CODE_PMDISABLED:
7253                 NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link PMDISABLED interrupt");
7254                 break;
7255             case NV_MINION_NVLINK_LINK_INTR_CODE_DLCMDFAULT:
7256                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link DLCMDFAULT interrupt", NV_FALSE);
7257                 break;
7258             case NV_MINION_NVLINK_LINK_INTR_CODE_TLREQ:
7259                 NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link TLREQ interrupt");
7260                 break;
7261             case NV_MINION_NVLINK_LINK_INTR_CODE_NOINIT:
7262                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link NOINIT interrupt", NV_FALSE);
7263                 break;
7264             case NV_MINION_NVLINK_LINK_INTR_CODE_NOTIFY:
7265                 NVSWITCH_PRINT(device, INFO,
7266                       "%s: Received MINION NOTIFY interrupt on MINION %d : link %d.\n",
7267                       __FUNCTION__, instance, link);
7268                 break;
7269             case NV_MINION_NVLINK_LINK_INTR_CODE_LOCAL_CONFIG_ERR:
7270                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Local-Config-Error interrupt", NV_FALSE);
7271                 break;
7272             case NV_MINION_NVLINK_LINK_INTR_CODE_NEGOTIATION_CONFIG_ERR:
7273                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Negotiation Config Err Interrupt", NV_FALSE);
7274                 break;
7275             case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT:
7276                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link BADINIT interrupt", NV_FALSE);
7277                 break;
7278             case NV_MINION_NVLINK_LINK_INTR_CODE_PMFAIL:
7279                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link PMFAIL interrupt", NV_FALSE);
7280                 break;
7281             case NV_MINION_NVLINK_LINK_INTR_CODE_INBAND_BUFFER_AVAILABLE:
7282             {
7283                 NVSWITCH_PRINT(device, INFO,
7284                       "Received INBAND_BUFFER_AVAILABLE interrupt on MINION %d,\n", instance);
7285                 nvswitch_minion_receive_inband_data_ls10(device, link);
7286                 break;
7287             }
7288 
7289             default:
7290                 NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Interrupt code unknown", NV_FALSE);
7291         }
7292         nvswitch_clear_flags(&unhandled, bit);
7293 
7294         // Disable interrupt bit for the given link - fatal error ocurred before
7295         if (device->link[link].fatal_error_occurred)
7296         {
7297             enabledLinks &= ~bit;
7298             reg = DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK, enabledLinks);
7299             NVSWITCH_MINION_LINK_WR32_LS10(device, link, _MINION, _MINION_INTR_STALL_EN, reg);
7300         }
7301     }
7302     FOR_EACH_INDEX_IN_MASK_END;
7303 
7304     NVSWITCH_UNHANDLED_CHECK(device, unhandled);
7305 
7306     if (unhandled != 0)
7307     {
7308         return -NVL_MORE_PROCESSING_REQUIRED;
7309     }
7310 
7311     return NVL_SUCCESS;
7312 }
7313 
7314