1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "common_nvswitch.h"
25 #include "bios_nvswitch.h"
26 #include "error_nvswitch.h"
27 #include "regkey_nvswitch.h"
28 #include "haldef_nvswitch.h"
29 #include "lr10/lr10.h"
30 #include "lr10/clock_lr10.h"
31 #include "lr10/minion_lr10.h"
32 #include "lr10/soe_lr10.h"
33 #include "lr10/pmgr_lr10.h"
34 #include "lr10/therm_lr10.h"
35 #include "lr10/inforom_lr10.h"
36 #include "lr10/smbpbi_lr10.h"
37 #include "flcn/flcnable_nvswitch.h"
38 #include "soe/soe_nvswitch.h"
39 
40 #include "nvswitch/lr10/dev_nvs_top.h"
41 #include "nvswitch/lr10/dev_pri_ringmaster.h"
42 #include "nvswitch/lr10/dev_pri_ringstation_sys.h"
43 #include "nvswitch/lr10/dev_nvlsaw_ip.h"
44 #include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h"
45 #include "nvswitch/lr10/dev_nvs_master.h"
46 #include "nvswitch/lr10/dev_nvltlc_ip.h"
47 #include "nvswitch/lr10/dev_nvldl_ip.h"
48 #include "nvswitch/lr10/dev_nvlipt_lnk_ip.h"
49 #include "nvswitch/lr10/dev_nvlctrl_ip.h"
50 #include "nvswitch/lr10/dev_npg_ip.h"
51 #include "nvswitch/lr10/dev_npgperf_ip.h"
52 #include "nvswitch/lr10/dev_nport_ip.h"
53 #include "nvswitch/lr10/dev_ingress_ip.h"
54 #include "nvswitch/lr10/dev_tstate_ip.h"
55 #include "nvswitch/lr10/dev_egress_ip.h"
56 #include "nvswitch/lr10/dev_route_ip.h"
57 #include "nvswitch/lr10/dev_therm.h"
58 #include "nvswitch/lr10/dev_soe_ip.h"
59 #include "nvswitch/lr10/dev_route_ip_addendum.h"
60 #include "nvswitch/lr10/dev_minion_ip.h"
61 #include "nvswitch/lr10/dev_minion_ip_addendum.h"
62 #include "nvswitch/lr10/dev_nport_ip_addendum.h"
63 #include "nvswitch/lr10/dev_nxbar_tile_ip.h"
64 #include "nvswitch/lr10/dev_nxbar_tc_global_ip.h"
65 #include "nvswitch/lr10/dev_sourcetrack_ip.h"
66 
67 #include "oob/smbpbi.h"
68 
69 #define DMA_ADDR_WIDTH_LR10     64
70 #define ROUTE_GANG_TABLE_SIZE (1 << DRF_SIZE(NV_ROUTE_REG_TABLE_ADDRESS_INDEX))
71 
72 static void
73 _nvswitch_deassert_link_resets_lr10
74 (
75     nvswitch_device *device
76 )
77 {
78     NvU32 val, i;
79     NVSWITCH_TIMEOUT timeout;
80     NvBool           keepPolling;
81 
82     NVSWITCH_PRINT(device, WARN,
83         "%s: NVSwitch Driver is taking the links out of reset. This should only happen during forced config.\n",
84         __FUNCTION__);
85 
86     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
87     {
88         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
89 
90         val = NVSWITCH_LINK_RD32_LR10(device, i,
91                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
92         val = FLD_SET_DRF_NUM(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, _LINK_RESET,
93                           NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_DEASSERT, val);
94 
95         NVSWITCH_LINK_WR32_LR10(device, i,
96                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, val);
97     }
98 
99     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
100     {
101         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
102 
103         // Poll for _RESET_STATUS == _DEASSERTED
104         nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
105 
106         do
107         {
108             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
109 
110             val = NVSWITCH_LINK_RD32_LR10(device, i,
111                     NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
112             if (FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
113                         _LINK_RESET_STATUS, _DEASSERTED, val))
114             {
115                 break;
116             }
117 
118             nvswitch_os_sleep(1);
119         }
120         while (keepPolling);
121 
122         if (!FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
123                     _LINK_RESET_STATUS, _DEASSERTED, val))
124         {
125             NVSWITCH_PRINT(device, ERROR,
126                 "%s: Timeout waiting for link %d_LINK_RESET_STATUS == _DEASSERTED\n",
127                 __FUNCTION__, i);
128                 // Bug 2974064: Review this timeout handling (fall through)
129         }
130     }
131 }
132 
133 static void
134 _nvswitch_train_forced_config_link_lr10
135 (
136     nvswitch_device *device,
137     NvU32            linkId
138 )
139 {
140     NvU32 data, i;
141     nvlink_link *link;
142 
143     link = nvswitch_get_link(device, linkId);
144 
145     if ((link == NULL) ||
146         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
147         (linkId >= NVSWITCH_NVLINK_MAX_LINKS))
148     {
149         return;
150     }
151 
152     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
153     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_HWCFG, _ENABLE, data);
154     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
155 
156     // Add some delay to let the sim/emu go to SAFE
157     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
158 
159     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
160     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_NVHS, _ENABLE, data);
161     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
162 
163     // Add some delay to let the sim/emu go to HS
164     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
165 
166     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
167     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE,      _ACTIVE, data);
168     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, data);
169     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION,        _LTSSM_CHANGE, data);
170     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE, data);
171 
172     i = 0;
173 
174     // Poll until LINK_CHANGE[1:0] != 2b01.
175     while (i < 5)
176     {
177         data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
178 
179         if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _BUSY, data))
180         {
181             NVSWITCH_PRINT(device, INFO,
182                 "%s : Waiting for link %d to go to ACTIVE\n",
183                 __FUNCTION__, linkId);
184         }
185         else if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _FAULT, data))
186         {
187             NVSWITCH_PRINT(device, ERROR,
188                 "%s : Fault while changing LINK to ACTIVE. Link = %d\n",
189                 __FUNCTION__, linkId);
190             break;
191         }
192         else
193         {
194             break;
195         }
196 
197         NVSWITCH_NSEC_DELAY(5 * NVSWITCH_INTERVAL_1USEC_IN_NS);
198         i++;
199     }
200 
201     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_STATE);
202 
203     if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_STATE, _STATE, _ACTIVE, data))
204     {
205         NVSWITCH_PRINT(device, INFO,
206             "%s : Link %d is in ACTIVE state, setting BUFFER_READY\n",
207             __FUNCTION__, linkId);
208 
209         // Set buffer ready only for nvlink TLC and not NPORT
210         nvswitch_init_buffer_ready(device, link, NV_FALSE);
211     }
212     else
213     {
214         NVSWITCH_PRINT(device, ERROR,
215             "%s : Timeout while waiting for link %d to go to ACTIVE\n",
216             __FUNCTION__, linkId);
217         NVSWITCH_PRINT(device, ERROR,
218             "%s : Link %d is in 0x%x state\n",
219             __FUNCTION__, linkId,DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data));
220     }
221 
222 }
223 
224 void
225 _nvswitch_setup_chiplib_forced_config_lr10
226 (
227     nvswitch_device *device
228 )
229 {
230     NvU64 links = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) +
231                   ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32);
232     NvU32 i;
233 
234     if (links == 0)
235     {
236         return;
237     }
238 
239     //
240     // First, take the links out of reset
241     //
242     // NOTE: On LR10, MINION will take the links out of reset during INITPHASE1
243     // On platforms where MINION is not present and/or we want to run with forced
244     // config, the driver must de-assert the link reset
245     //
246     _nvswitch_deassert_link_resets_lr10(device);
247 
248     // Next, train the links to ACTIVE/NVHS
249     FOR_EACH_INDEX_IN_MASK(64, i, links)
250     {
251         if (device->link[i].valid)
252         {
253             _nvswitch_train_forced_config_link_lr10(device, i);
254         }
255     }
256     FOR_EACH_INDEX_IN_MASK_END;
257 }
258 
259 /*!
260  * @brief Parse packed little endian data and unpack into padded structure
261  *
262  * @param[in]   format          Data format
263  * @param[in]   packedData      Packed little endian data
264  * @param[out]  unpackedData    Unpacked padded structure
265  * @param[out]  unpackedSize    Unpacked data size
266  * @param[out]  fieldsCount     Number of fields
267  *
268  * @return 'NV_OK'
269  */
270 NV_STATUS
271 _nvswitch_devinit_unpack_structure
272 (
273     const char *format,
274     const NvU8 *packedData,
275     NvU32      *unpackedData,
276     NvU32      *unpackedSize,
277     NvU32      *fieldsCount
278 )
279 {
280     NvU32 unpkdSize = 0;
281     NvU32 fields = 0;
282     NvU32 count;
283     NvU32 data;
284     char fmt;
285 
286     while ((fmt = *format++))
287     {
288         count = 0;
289         while ((fmt >= '0') && (fmt <= '9'))
290         {
291             count *= 10;
292             count += fmt - '0';
293             fmt = *format++;
294         }
295         if (count == 0)
296             count = 1;
297 
298         while (count--)
299         {
300             switch (fmt)
301             {
302                 case 'b':
303                     data = *packedData++;
304                     unpkdSize += 1;
305                     break;
306 
307                 case 's':    // signed byte
308                     data = *packedData++;
309                     if (data & 0x80)
310                         data |= ~0xff;
311                     unpkdSize += 1;
312                     break;
313 
314                 case 'w':
315                     data  = *packedData++;
316                     data |= *packedData++ << 8;
317                     unpkdSize += 2;
318                     break;
319 
320                 case 'd':
321                     data  = *packedData++;
322                     data |= *packedData++ << 8;
323                     data |= *packedData++ << 16;
324                     data |= *packedData++ << 24;
325                     unpkdSize += 4;
326                     break;
327 
328                 default:
329                     return NV_ERR_GENERIC;
330             }
331             *unpackedData++ = data;
332             fields++;
333         }
334     }
335 
336     if (unpackedSize != NULL)
337         *unpackedSize = unpkdSize;
338 
339     if (fieldsCount != NULL)
340         *fieldsCount = fields;
341 
342     return NV_OK;
343 }
344 
345 /*!
346  * @brief Calculate packed and unpacked data size based on given data format
347  *
348  * @param[in]   format          Data format
349  * @param[out]  packedSize      Packed data size
350  * @param[out]  unpackedSize    Unpacked data size
351  *
352  */
353 void
354 _nvswitch_devinit_calculate_sizes
355 (
356     const char *format,
357     NvU32      *packedSize,
358     NvU32      *unpackedSize
359 )
360 {
361     NvU32 unpkdSize = 0;
362     NvU32 pkdSize = 0;
363     NvU32 count;
364     char fmt;
365 
366     while ((fmt = *format++))
367     {
368         count = 0;
369         while ((fmt >= '0') && (fmt <= '9'))
370         {
371             count *= 10;
372             count += fmt - '0';
373             fmt = *format++;
374         }
375         if (count == 0)
376             count = 1;
377 
378         switch (fmt)
379         {
380             case 'b':
381                 pkdSize += count * 1;
382                 unpkdSize += count * sizeof(bios_U008);
383                 break;
384 
385             case 's':    // signed byte
386                 pkdSize += count * 1;
387                 unpkdSize += count * sizeof(bios_S008);
388                 break;
389 
390             case 'w':
391                 pkdSize += count * 2;
392                 unpkdSize += count * sizeof(bios_U016);
393                 break;
394 
395             case 'd':
396                 pkdSize += count * 4;
397                 unpkdSize += count * sizeof(bios_U032);
398                 break;
399         }
400     }
401 
402     if (packedSize != NULL)
403         *packedSize = pkdSize;
404 
405     if (unpackedSize != NULL)
406         *unpackedSize = unpkdSize;
407 }
408 
409 /*!
410  * @brief Calculate packed and unpacked data size based on given data format
411  *
412  * @param[in]   format          Data format
413  * @param[out]  packedSize      Packed data size
414  * @param[out]  unpackedSize    Unpacked data size
415  *
416  */
417 
418 NV_STATUS
419 _nvswitch_vbios_read_structure
420 (
421     nvswitch_device *device,
422     void            *structure,
423     NvU32           offset,
424     NvU32           *ppacked_size,
425     const char      *format
426 )
427 {
428     NvU32  packed_size;
429     NvU8  *packed_data;
430     NvU32  unpacked_bytes;
431 
432     // calculate the size of the data as indicated by its packed format.
433     _nvswitch_devinit_calculate_sizes(format, &packed_size, &unpacked_bytes);
434 
435     if (ppacked_size)
436         *ppacked_size = packed_size;
437 
438     //
439     // is 'offset' too big?
440     // happens when we read bad ptrs from fixed addrs in image frequently
441     //
442     if ((offset + packed_size) > device->biosImage.size)
443     {
444         NVSWITCH_PRINT(device, ERROR, "%s: Bad offset in bios read: 0x%x, max is 0x%x, fmt is '%s'\n",
445                        __FUNCTION__, offset, device->biosImage.size, format);
446         return NV_ERR_GENERIC;
447     }
448 
449     packed_data = &device->biosImage.pImage[offset];
450     return _nvswitch_devinit_unpack_structure(format, packed_data, structure,
451                                               &unpacked_bytes, NULL);
452 }
453 
454 
455 NvlStatus
456 nvswitch_vbios_read_structure_lr10
457 (
458     nvswitch_device *device,
459     void            *structure,
460     NvU32           offset,
461     NvU32           *ppacked_size,
462     const char      *format
463 )
464 {
465     if (NV_OK == _nvswitch_vbios_read_structure(device, structure, offset, ppacked_size, format))
466     {
467        return NVL_SUCCESS;
468     }
469     else
470     {
471        return -NVL_ERR_GENERIC;
472     }
473 }
474 
475 NvU8
476 _nvswitch_vbios_read8
477 (
478     nvswitch_device *device,
479     NvU32           offset
480 )
481 {
482     bios_U008 data;     // BiosReadStructure expects 'bios' types
483 
484     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "b");
485 
486     return (NvU8) data;
487 }
488 
489 NvU16
490 _nvswitch_vbios_read16
491 (
492     nvswitch_device *device,
493     NvU32           offset
494 )
495 {
496     bios_U016 data;     // BiosReadStructure expects 'bios' types
497 
498     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "w");
499 
500     return (NvU16) data;
501 }
502 
503 
504 NvU32
505 _nvswitch_vbios_read32
506 (
507     nvswitch_device *device,
508     NvU32           offset
509 )
510 {
511     bios_U032 data;     // BiosReadStructure expects 'bios' types
512 
513     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "d");
514 
515     return (NvU32) data;
516 }
517 
518 NV_STATUS
519 _nvswitch_perform_BIT_offset_update
520 (
521     nvswitch_device *device,
522     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
523 )
524 {
525     BIT_HEADER_V1_00         bitHeader;
526     BIT_TOKEN_V1_00          bitToken;
527     NV_STATUS                rmStatus;
528     NvU32                    dataPointerOffset;
529     NvU32 i;
530 
531     rmStatus = _nvswitch_vbios_read_structure(device,
532                                               (NvU8*) &bitHeader,
533                                               bios_config->bit_address,
534                                               (NvU32 *) 0,
535                                               BIT_HEADER_V1_00_FMT);
536 
537     if(rmStatus != NV_OK)
538     {
539         NVSWITCH_PRINT(device, ERROR,
540                        "%s: Failed to read BIT table structure!.\n",
541                        __FUNCTION__);
542         return rmStatus;
543     }
544 
545     for(i=0; i < bitHeader.TokenEntries; i++)
546     {
547         NvU32 BitTokenLocation = bios_config->bit_address + bitHeader.HeaderSize + (i * bitHeader.TokenSize);
548         rmStatus = _nvswitch_vbios_read_structure(device,
549                                                  (NvU8*) &bitToken,
550                                                  BitTokenLocation,
551                                                  (NvU32 *) 0,
552                                                  BIT_TOKEN_V1_00_FMT);
553         if(rmStatus != NV_OK)
554         {
555             NVSWITCH_PRINT(device, WARN,
556                 "%s: Failed to read BIT token %d!\n",
557                 __FUNCTION__, i);
558             return NV_ERR_GENERIC;
559         }
560 
561         dataPointerOffset = (bios_config->pci_image_address + bitToken.DataPtr);
562         switch(bitToken.TokenId)
563         {
564             case BIT_TOKEN_NVINIT_PTRS:
565             {
566                 BIT_DATA_NVINIT_PTRS_V1 nvInitTablePtrs;
567                 rmStatus = _nvswitch_vbios_read_structure(device,
568                                                           (NvU8*) &nvInitTablePtrs,
569                                                           dataPointerOffset,
570                                                           (NvU32 *) 0,
571                                                           BIT_DATA_NVINIT_PTRS_V1_30_FMT);
572                 if (rmStatus != NV_OK)
573                 {
574                     NVSWITCH_PRINT(device, WARN,
575                                    "%s: Failed to read internal data structure\n",
576                                    __FUNCTION__);
577                     return NV_ERR_GENERIC;
578                 }
579                 // Update the retrived info with device info
580                 bios_config->nvlink_config_table_address = (nvInitTablePtrs.NvlinkConfigDataPtr + bios_config->pci_image_address);
581             }
582             break;
583         }
584     }
585 
586     return NV_OK;
587 }
588 
589 NV_STATUS
590 _nvswitch_validate_BIT_header
591 (
592     nvswitch_device *device,
593     NvU32            bit_address
594 )
595 {
596     NvU32    headerSize = 0;
597     NvU32    chkSum = 0;
598     NvU32    i;
599 
600     //
601     // For now let's assume the Header Size is always at the same place.
602     // We can create something more complex if needed later.
603     //
604     headerSize = (NvU32)_nvswitch_vbios_read8(device, bit_address + BIT_HEADER_SIZE_OFFSET);
605 
606     // Now perform checksum
607     for (i = 0; i < headerSize; i++)
608         chkSum += (NvU32)_nvswitch_vbios_read8(device, bit_address + i);
609 
610     //Byte checksum removes upper bytes
611     chkSum = chkSum & 0xFF;
612 
613     if (chkSum)
614         return NV_ERR_GENERIC;
615 
616     return NV_OK;
617 }
618 
619 
620 NV_STATUS
621 nvswitch_verify_header
622 (
623     nvswitch_device *device,
624     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
625 )
626 {
627     NvU32       i;
628     NV_STATUS   status = NV_ERR_GENERIC;
629 
630     if ((bios_config == NULL) || (!bios_config->pci_image_address))
631     {
632         NVSWITCH_PRINT(device, ERROR,
633             "%s: PCI Image offset is not identified\n",
634             __FUNCTION__);
635         return status;
636     }
637 
638     // attempt to find the init info in the BIOS
639     for (i = bios_config->pci_image_address; i < device->biosImage.size - 3; i++)
640     {
641         NvU16 bitheaderID = _nvswitch_vbios_read16(device, i);
642         if (bitheaderID == BIT_HEADER_ID)
643         {
644             NvU32 signature = _nvswitch_vbios_read32(device, i + 2);
645             if (signature == BIT_HEADER_SIGNATURE)
646             {
647                 bios_config->bit_address = i;
648 
649                 // Checksum BIT to prove accuracy
650                 if (NV_OK != _nvswitch_validate_BIT_header(device, bios_config->bit_address))
651                 {
652                     device->biosImage.pImage = 0;
653                     device->biosImage.size = 0;
654                 }
655             }
656         }
657         // only if we find the bit address do we break
658         if (bios_config->bit_address)
659             break;
660     }
661     if (bios_config->bit_address)
662     {
663         status = NV_OK;
664     }
665 
666     return status;
667 }
668 
669 NV_STATUS
670 _nvswitch_vbios_update_bit_Offset
671 (
672     nvswitch_device *device,
673     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
674 )
675 {
676     NV_STATUS   status = NV_OK;
677 
678     if (bios_config->bit_address)
679     {
680         goto vbios_update_bit_Offset_done;
681     }
682 
683     status = nvswitch_verify_header(device, bios_config);
684     if (status != NV_OK)
685     {
686         NVSWITCH_PRINT(device, ERROR, "%s: *** BIT header is not found in vbios!\n",
687             __FUNCTION__);
688         goto vbios_update_bit_Offset_done;
689     }
690 
691     if (bios_config->bit_address)
692     {
693 
694         status = _nvswitch_perform_BIT_offset_update(device, bios_config);
695         if (status != NV_OK)
696             goto vbios_update_bit_Offset_done;
697     }
698 
699 vbios_update_bit_Offset_done:
700     return status;
701 }
702 
703 
704 NV_STATUS
705 _nvswitch_vbios_identify_pci_image_loc
706 (
707     nvswitch_device         *device,
708     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
709 )
710 {
711     NV_STATUS   status = NV_OK;
712     NvU32       i;
713 
714     if (bios_config->pci_image_address)
715     {
716         goto vbios_identify_pci_image_loc_done;
717     }
718 
719     // Match the PCI_EXP_ROM_SIGNATURE and followed by the PCI Data structure
720     // with PCIR and matching vendor ID
721     NVSWITCH_PRINT(device, SETUP,
722         "%s: Verifying and extracting PCI Data.\n",
723         __FUNCTION__);
724 
725     // attempt to find the init info in the BIOS
726     for (i = 0; i < (device->biosImage.size - PCI_ROM_HEADER_PCI_DATA_SIZE); i++)
727     {
728         NvU16 pci_rom_sigature = _nvswitch_vbios_read16(device, i);
729 
730         if (pci_rom_sigature == PCI_EXP_ROM_SIGNATURE)
731         {
732             NvU32 pcir_data_dffSet  = _nvswitch_vbios_read16(device, i + PCI_ROM_HEADER_SIZE);  // 0x16 -> 0x18 i.e, including the ROM Signature bytes
733 
734             if (((i + pcir_data_dffSet) + PCI_DATA_STRUCT_SIZE) < device->biosImage.size)
735             {
736                 NvU32 pcirSigature = _nvswitch_vbios_read32(device, (i + pcir_data_dffSet));
737 
738                 if (pcirSigature == PCI_DATA_STRUCT_SIGNATURE)
739                 {
740                     PCI_DATA_STRUCT pciData;
741                     status = _nvswitch_vbios_read_structure(device,
742                                                            (NvU8*) &pciData,
743                                                             i + pcir_data_dffSet,
744                                                             (NvU32 *) 0,
745                                                             PCI_DATA_STRUCT_FMT);
746                     if (status != NV_OK)
747                     {
748                         NVSWITCH_PRINT(device, WARN,
749                                        "%s: Failed to PCI Data for validation\n",
750                                        __FUNCTION__);
751                         goto vbios_identify_pci_image_loc_done;
752                     }
753 
754                     // Validate the vendor details as well
755                     if (pciData.vendorID == PCI_VENDOR_ID_NVIDIA)
756                     {
757                         bios_config->pci_image_address = i;
758                         break;
759                     }
760                 }
761             }
762         }
763     }
764 
765 vbios_identify_pci_image_loc_done:
766     return status;
767 }
768 
769 NvU32 _nvswitch_get_nvlink_config_address
770 (
771     nvswitch_device         *device,
772     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
773 )
774 {
775     return bios_config->nvlink_config_table_address;
776 }
777 
778 NV_STATUS
779 _nvswitch_read_vbios_link_base_entry
780 (
781     nvswitch_device *device,
782     NvU32            tblPtr,
783     NVLINK_CONFIG_DATA_BASEENTRY  *link_base_entry
784 )
785 {
786     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
787     NVLINK_VBIOS_CONFIG_DATA_BASEENTRY vbios_link_base_entry;
788 
789     status = _nvswitch_vbios_read_structure(device, &vbios_link_base_entry, tblPtr, (NvU32 *)0, NVLINK_CONFIG_DATA_BASEENTRY_FMT);
790     if (status != NV_OK)
791     {
792         NVSWITCH_PRINT(device, ERROR,
793             "%s: Error on reading nvlink base entry\n",
794             __FUNCTION__);
795         return status;
796     }
797 
798     link_base_entry->positionId = vbios_link_base_entry.positionId;
799 
800     return status;
801 }
802 
803 NvlStatus
804 nvswitch_read_vbios_link_entries_lr10
805 (
806     nvswitch_device              *device,
807     NvU32                         tblPtr,
808     NvU32                         expected_link_entriesCount,
809     NVLINK_CONFIG_DATA_LINKENTRY *link_entries,
810     NvU32                        *identified_link_entriesCount
811 )
812 {
813     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
814     NvU32 i;
815     NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20 vbios_link_entry;
816     *identified_link_entriesCount = 0;
817 
818     for (i = 0; i < expected_link_entriesCount; i++)
819     {
820         status = _nvswitch_vbios_read_structure(device,
821                                                 &vbios_link_entry,
822                                                 tblPtr, (NvU32 *)0,
823                                                 NVLINK_CONFIG_DATA_LINKENTRY_FMT_20);
824         if (status != NV_OK)
825         {
826             NVSWITCH_PRINT(device, ERROR,
827                 "%s: Error on reading nvlink entry\n",
828                 __FUNCTION__);
829             return status;
830         }
831         link_entries[i].nvLinkparam0 = (NvU8)vbios_link_entry.nvLinkparam0;
832         link_entries[i].nvLinkparam1 = (NvU8)vbios_link_entry.nvLinkparam1;
833         link_entries[i].nvLinkparam2 = (NvU8)vbios_link_entry.nvLinkparam2;
834         link_entries[i].nvLinkparam3 = (NvU8)vbios_link_entry.nvLinkparam3;
835         link_entries[i].nvLinkparam4 = (NvU8)vbios_link_entry.nvLinkparam4;
836         link_entries[i].nvLinkparam5 = (NvU8)vbios_link_entry.nvLinkparam5;
837         link_entries[i].nvLinkparam6 = (NvU8)vbios_link_entry.nvLinkparam6;
838         tblPtr += (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32));
839 
840         NVSWITCH_PRINT(device, NOISY,
841             "<<<---- NvLink ID 0x%x ---->>>\n", i);
842         NVSWITCH_PRINT(device, NOISY,
843             "NVLink Params 0 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam0, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam0));
844         NVSWITCH_PRINT(device, NOISY,
845             "NVLink Params 1 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam1, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam1));
846         NVSWITCH_PRINT(device, NOISY,
847             "NVLink Params 2 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam2, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam2));
848         NVSWITCH_PRINT(device, NOISY,
849             "NVLink Params 3 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam3, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam3));
850         NVSWITCH_PRINT(device, NOISY,
851             "NVLink Params 4 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam4, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam4));
852         NVSWITCH_PRINT(device, NOISY,
853             "NVLink Params 5 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam5, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam5));
854         NVSWITCH_PRINT(device, NOISY,
855             "NVLink Params 6 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam6, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam6));
856         NVSWITCH_PRINT(device, NOISY,
857             "<<<---- NvLink ID 0x%x ---->>>\n\n", i);
858     }
859     *identified_link_entriesCount = i;
860     return status;
861 }
862 
863 NV_STATUS
864 _nvswitch_vbios_fetch_nvlink_entries
865 (
866     nvswitch_device         *device,
867     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
868 )
869 {
870     NvU32                       tblPtr;
871     NvU8                        version;
872     NvU8                        size;
873     NV_STATUS                   status = NV_ERR_GENERIC;
874     NVLINK_CONFIG_DATA_HEADER   header;
875     NvU32                       base_entry_index;
876     NvU32                       expected_base_entry_count;
877 
878     tblPtr = _nvswitch_get_nvlink_config_address(device, bios_config);
879     if (!tblPtr)
880     {
881         NVSWITCH_PRINT(device, ERROR,
882             "%s: No NvLink Config table set\n",
883             __FUNCTION__);
884         goto vbios_fetch_nvlink_entries_done;
885     }
886 
887     // Read the table version number
888     version = _nvswitch_vbios_read8(device, tblPtr);
889     switch (version)
890     {
891         case NVLINK_CONFIG_DATA_HEADER_VER_20:
892         case NVLINK_CONFIG_DATA_HEADER_VER_30:
893             size = _nvswitch_vbios_read8(device, tblPtr + 1);
894             if (size == NVLINK_CONFIG_DATA_HEADER_20_SIZE)
895             {
896                 // Grab Nvlink Config Data Header
897                 status = _nvswitch_vbios_read_structure(device, &header.ver_20, tblPtr, (NvU32 *) 0, NVLINK_CONFIG_DATA_HEADER_20_FMT);
898 
899                 if (status != NV_OK)
900                 {
901                     NVSWITCH_PRINT(device, ERROR,
902                         "%s: Error on reading the nvlink config header\n",
903                         __FUNCTION__);
904                 }
905             }
906             break;
907         default:
908             NVSWITCH_PRINT(device, ERROR,
909                 "%s: Invalid version 0x%x\n",
910                 __FUNCTION__, version);
911     }
912     if (status != NV_OK)
913     {
914         goto vbios_fetch_nvlink_entries_done;
915     }
916 
917     NVSWITCH_PRINT(device, NOISY,
918         "<<<---- NvLink Header ---->>>\n\n");
919     NVSWITCH_PRINT(device, NOISY,
920         "Version \t\t 0x%x\n", header.ver_20.Version);
921     NVSWITCH_PRINT(device, NOISY,
922         "Header Size \t0x%x\n", header.ver_20.HeaderSize);
923     NVSWITCH_PRINT(device, NOISY,
924         "Base Entry Size \t0x%x\n", header.ver_20.BaseEntrySize);
925     NVSWITCH_PRINT(device, NOISY,
926         "Base Entry count \t0x%x\n", header.ver_20.BaseEntryCount);
927     NVSWITCH_PRINT(device, NOISY,
928         "Link Entry Size \t0x%x\n", header.ver_20.LinkEntrySize);
929     NVSWITCH_PRINT(device, NOISY,
930         "Link Entry Count \t0x%x\n", header.ver_20.LinkEntryCount);
931     NVSWITCH_PRINT(device, NOISY,
932         "Reserved \t0x%x\n", header.ver_20.Reserved);
933     NVSWITCH_PRINT(device, NOISY,
934         "<<<---- NvLink Header ---->>>\n");
935     if (header.ver_20.Version == NVLINK_CONFIG_DATA_HEADER_VER_20)
936     {
937          device->bIsNvlinkVbiosTableVersion2 = NV_TRUE;
938     }
939     expected_base_entry_count = header.ver_20.BaseEntryCount;
940     if (expected_base_entry_count > NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY)
941     {
942         NVSWITCH_PRINT(device, WARN,
943             "%s: Greater than expected base entry count 0x%x - Restricting to count 0x%x\n",
944             __FUNCTION__, expected_base_entry_count, NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY);
945         expected_base_entry_count = NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY;
946     }
947 
948     tblPtr += header.ver_20.HeaderSize;
949     for (base_entry_index = 0; base_entry_index < expected_base_entry_count; base_entry_index++)
950     {
951         NvU32 expected_link_entriesCount = header.ver_20.LinkEntryCount;
952         if (expected_link_entriesCount > NVSWITCH_LINK_COUNT(device))
953         {
954             NVSWITCH_PRINT(device, WARN,
955                 "%s: Greater than expected link count 0x%x - Restricting to count 0x%x\n",
956                 __FUNCTION__, expected_link_entriesCount, NVSWITCH_LINK_COUNT(device));
957             expected_link_entriesCount = NVSWITCH_LINK_COUNT(device);
958         }
959 
960         // Grab Nvlink Config Data Base Entry
961         _nvswitch_read_vbios_link_base_entry(device, tblPtr, &bios_config->link_vbios_base_entry[base_entry_index]);
962         tblPtr += header.ver_20.BaseEntrySize;
963         device->hal.nvswitch_read_vbios_link_entries(device,
964                                           tblPtr,
965                                           expected_link_entriesCount,
966                                           bios_config->link_vbios_entry[base_entry_index],
967                                           &bios_config->identified_Link_entries[base_entry_index]);
968 
969         if (device->bIsNvlinkVbiosTableVersion2)
970         {
971             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32)));
972         }
973         else
974         {
975             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_30)/sizeof(NvU32)));
976         }
977     }
978 vbios_fetch_nvlink_entries_done:
979     return status;
980 }
981 
982 NV_STATUS
983 _nvswitch_vbios_assign_base_entry
984 (
985     nvswitch_device         *device,
986     NVSWITCH_BIOS_NVLINK_CONFIG    *bios_config
987 )
988 {
989     NvU32 physical_id;
990     NvU32 entry_index;
991 
992     physical_id = nvswitch_read_physical_id(device);
993 
994     for (entry_index = 0; entry_index < NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY; entry_index++)
995     {
996         if (physical_id == bios_config->link_vbios_base_entry[entry_index].positionId)
997         {
998             bios_config->link_base_entry_assigned = entry_index;
999             return NV_OK;
1000         }
1001     }
1002 
1003     // TODO: Bug 3507948
1004     NVSWITCH_PRINT(device, ERROR,
1005             "%s: Error on assigning base entry. Setting base entry index = 0\n",
1006             __FUNCTION__);
1007     bios_config->link_base_entry_assigned = 0;
1008 
1009     return NV_OK;
1010 }
1011 
1012 NV_STATUS
1013 _nvswitch_setup_link_vbios_overrides
1014 (
1015     nvswitch_device *device,
1016     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
1017 )
1018 {
1019     NV_STATUS    status         = NV_OK;
1020 
1021     if (bios_config == NULL)
1022     {
1023         NVSWITCH_PRINT(device, ERROR,
1024                 "%s: BIOS config override not supported\n",
1025                 __FUNCTION__);
1026          return -NVL_ERR_NOT_SUPPORTED;
1027     }
1028 
1029     bios_config->vbios_disabled_link_mask = 0;
1030 
1031     bios_config->bit_address                 = 0;
1032     bios_config->pci_image_address           = 0;
1033     bios_config->nvlink_config_table_address = 0;
1034 
1035     if ((device->biosImage.size == 0) || (device->biosImage.pImage == NULL))
1036     {
1037         NVSWITCH_PRINT(device, ERROR,
1038                 "%s: VBIOS not exist size:0x%x\n",
1039                 __FUNCTION__, device->biosImage.size);
1040          return -NVL_ERR_NOT_SUPPORTED;
1041     }
1042 
1043     //
1044     // Locate the PCI ROM Image
1045     //
1046     if (_nvswitch_vbios_identify_pci_image_loc(device, bios_config)  != NV_OK)
1047     {
1048         NVSWITCH_PRINT(device, ERROR,
1049             "%s: Error on identifying pci image loc\n",
1050             __FUNCTION__);
1051         status = NV_ERR_GENERIC;
1052         goto setup_link_vbios_overrides_done;
1053     }
1054 
1055     //
1056     // Locate and fetch BIT offset
1057     //
1058     if (_nvswitch_vbios_update_bit_Offset(device, bios_config) != NV_OK)
1059     {
1060         NVSWITCH_PRINT(device, ERROR,
1061             "%s: Error on identifying pci image loc\n",
1062             __FUNCTION__);
1063         status = NV_ERR_GENERIC;
1064         goto setup_link_vbios_overrides_done;
1065     }
1066 
1067     //
1068     // Fetch NvLink Entries
1069     //
1070     if (_nvswitch_vbios_fetch_nvlink_entries(device, bios_config) != NV_OK)
1071     {
1072         NVSWITCH_PRINT(device, ERROR,
1073             "%s: Error on fetching nvlink entries\n",
1074             __FUNCTION__);
1075         status = NV_ERR_GENERIC;
1076         goto setup_link_vbios_overrides_done;
1077     }
1078 
1079     //
1080     // Assign Base Entry for this device
1081     //
1082     if (_nvswitch_vbios_assign_base_entry(device, bios_config) != NV_OK)
1083     {
1084         NVSWITCH_PRINT(device, ERROR,
1085             "%s: Error on assigning base entry\n",
1086             __FUNCTION__);
1087         status = NV_ERR_GENERIC;
1088         goto setup_link_vbios_overrides_done;
1089     }
1090 
1091 setup_link_vbios_overrides_done:
1092     if (status != NV_OK)
1093     {
1094         bios_config->bit_address                = 0;
1095         bios_config->pci_image_address          = 0;
1096         bios_config->nvlink_config_table_address =0;
1097     }
1098     return status;
1099 }
1100 
1101 /*
1102  * @Brief : Setting up system registers after device initialization
1103  *
1104  * @Description :
1105  *
1106  * @param[in] device        a reference to the device to initialize
1107  */
1108 NvlStatus
1109 nvswitch_setup_system_registers_lr10
1110 (
1111     nvswitch_device *device
1112 )
1113 {
1114     nvlink_link *link;
1115     NvU8 i;
1116     NvU64 enabledLinkMask;
1117 
1118     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
1119 
1120     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
1121     {
1122         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
1123 
1124         link = nvswitch_get_link(device, i);
1125 
1126         if ((link == NULL) ||
1127             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
1128             (i >= NVSWITCH_NVLINK_MAX_LINKS))
1129         {
1130             continue;
1131         }
1132 
1133         nvswitch_setup_link_system_registers(device, link);
1134         nvswitch_load_link_disable_settings(device, link);
1135     }
1136     FOR_EACH_INDEX_IN_MASK_END;
1137 
1138     return NVL_SUCCESS;
1139 }
1140 
1141 NvlStatus
1142 nvswitch_deassert_link_reset_lr10
1143 (
1144     nvswitch_device *device,
1145     nvlink_link     *link
1146 )
1147 {
1148     NvU64 mode;
1149     NvlStatus status = NVL_SUCCESS;
1150 
1151     status = device->hal.nvswitch_corelib_get_dl_link_mode(link, &mode);
1152 
1153     if (status != NVL_SUCCESS)
1154     {
1155         NVSWITCH_PRINT(device, ERROR,
1156                 "%s:DL link mode failed on link %d\n",
1157                 __FUNCTION__, link->linkNumber);
1158         return status;
1159     }
1160 
1161     // Check if the link is RESET
1162     if (mode != NVLINK_LINKSTATE_RESET)
1163     {
1164         return NVL_SUCCESS;
1165     }
1166 
1167     // Send INITPHASE1 to bring link out of reset
1168     status = link->link_handlers->set_dl_link_mode(link,
1169                                         NVLINK_LINKSTATE_INITPHASE1,
1170                                         NVLINK_STATE_CHANGE_ASYNC);
1171 
1172     if (status != NVL_SUCCESS)
1173     {
1174         NVSWITCH_PRINT(device, ERROR,
1175                 "%s: INITPHASE1 failed on link %d\n",
1176                 __FUNCTION__, link->linkNumber);
1177     }
1178 
1179     return status;
1180 }
1181 
1182 static NvU32
1183 _nvswitch_get_num_vcs_lr10
1184 (
1185     nvswitch_device *device
1186 )
1187 {
1188     return NVSWITCH_NUM_VCS_LR10;
1189 }
1190 
1191 void
1192 nvswitch_determine_platform_lr10
1193 (
1194     nvswitch_device *device
1195 )
1196 {
1197     NvU32 value;
1198 
1199     //
1200     // Determine which model we are using SMC_BOOT_2 and OS query
1201     //
1202     value = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_2);
1203     device->is_emulation = FLD_TEST_DRF(_PSMC, _BOOT_2, _EMULATION, _YES, value);
1204 
1205     if (!IS_EMULATION(device))
1206     {
1207         // If we are not on fmodel, we must be on RTL sim or silicon
1208         if (FLD_TEST_DRF(_PSMC, _BOOT_2, _FMODEL, _YES, value))
1209         {
1210             device->is_fmodel = NV_TRUE;
1211         }
1212         else
1213         {
1214             device->is_rtlsim = NV_TRUE;
1215 
1216             // Let OS code finalize RTL sim vs silicon setting
1217             nvswitch_os_override_platform(device->os_handle, &device->is_rtlsim);
1218         }
1219     }
1220 
1221 #if defined(NVLINK_PRINT_ENABLED)
1222     {
1223         const char *build;
1224         const char *mode;
1225 
1226         build = "HW";
1227         if (IS_FMODEL(device))
1228             mode = "fmodel";
1229         else if (IS_RTLSIM(device))
1230             mode = "rtlsim";
1231         else if (IS_EMULATION(device))
1232             mode = "emulation";
1233         else
1234             mode = "silicon";
1235 
1236         NVSWITCH_PRINT(device, SETUP,
1237             "%s: build: %s platform: %s\n",
1238              __FUNCTION__, build, mode);
1239     }
1240 #endif // NVLINK_PRINT_ENABLED
1241 }
1242 
1243 static void
1244 _nvswitch_portstat_reset_latency_counters
1245 (
1246     nvswitch_device *device
1247 )
1248 {
1249     // Set SNAPONDEMAND from 0->1 to reset the counters
1250     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1251         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1252         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1253 
1254     // Set SNAPONDEMAND back to 0.
1255     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1256         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1257         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1258 }
1259 
1260 //
1261 // Data collector which runs on a background thread, collecting latency stats.
1262 //
1263 // The latency counters have a maximum window period of 3.299 seconds
1264 // (2^32 clk cycles). The counters reset after this period. So SW snaps
1265 // the bins and records latencies every 3 seconds. Setting SNAPONDEMAND from 0->1
1266 // snaps the  latency counters and updates them to PRI registers for
1267 // the SW to read. It then resets the counters to start collecting fresh latencies.
1268 //
1269 
1270 void
1271 nvswitch_internal_latency_bin_log_lr10
1272 (
1273     nvswitch_device *device
1274 )
1275 {
1276     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1277     NvU32 idx_nport;
1278     NvU32 idx_vc;
1279     NvBool vc_valid;
1280     NvU32 latency;
1281     NvU64 time_nsec;
1282     NvU32 link_type;    // Access or trunk link
1283     NvU64 last_visited_time_nsec;
1284 
1285     if (chip_device->latency_stats == NULL)
1286     {
1287         // Latency stat buffers not allocated yet
1288         return;
1289     }
1290 
1291     time_nsec = nvswitch_os_get_platform_time();
1292     last_visited_time_nsec = chip_device->latency_stats->last_visited_time_nsec;
1293 
1294     // Update last visited time
1295     chip_device->latency_stats->last_visited_time_nsec = time_nsec;
1296 
1297     // Compare time stamp and reset the counters if the snap is missed
1298     if (!IS_RTLSIM(device) || !IS_FMODEL(device))
1299     {
1300         if ((last_visited_time_nsec != 0) &&
1301             ((time_nsec - last_visited_time_nsec) > 3 * NVSWITCH_INTERVAL_1SEC_IN_NS))
1302         {
1303             NVSWITCH_PRINT(device, ERROR,
1304                 "Latency metrics recording interval missed.  Resetting counters.\n");
1305             _nvswitch_portstat_reset_latency_counters(device);
1306             return;
1307         }
1308     }
1309 
1310     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
1311     {
1312         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
1313         {
1314             continue;
1315         }
1316 
1317         // Setting SNAPONDEMAND from 0->1 snaps the latencies and resets the counters
1318         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1319             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1320             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1321 
1322         //
1323         // TODO: Check _STARTCOUNTER and don't log if counter not enabled.
1324         // Currently all counters are always enabled
1325         //
1326 
1327         link_type = NVSWITCH_LINK_RD32_LR10(device, idx_nport, NPORT, _NPORT, _CTRL);
1328         for (idx_vc = 0; idx_vc < NVSWITCH_NUM_VCS_LR10; idx_vc++)
1329         {
1330             vc_valid = NV_FALSE;
1331 
1332             // VC's CREQ0(0) and RSP0(5) are relevant on access links.
1333             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, link_type) &&
1334                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) ||
1335                 (idx_vc == NV_NPORT_VC_MAPPING_RSP0)))
1336             {
1337                 vc_valid = NV_TRUE;
1338             }
1339 
1340             // VC's CREQ0(0), RSP0(5), CREQ1(6) and RSP1(7) are relevant on trunk links.
1341             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, link_type) &&
1342                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0)  ||
1343                  (idx_vc == NV_NPORT_VC_MAPPING_RSP0)   ||
1344                  (idx_vc == NV_NPORT_VC_MAPPING_CREQ1)  ||
1345                  (idx_vc == NV_NPORT_VC_MAPPING_RSP1)))
1346             {
1347                 vc_valid = NV_TRUE;
1348             }
1349 
1350             // If the VC is not being used, skip reading it
1351             if (!vc_valid)
1352             {
1353                 continue;
1354             }
1355 
1356             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _LOW, idx_vc);
1357             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].low += latency;
1358 
1359             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _MEDIUM, idx_vc);
1360             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].medium += latency;
1361 
1362             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _HIGH, idx_vc);
1363             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].high += latency;
1364 
1365             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _PANIC, idx_vc);
1366             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].panic += latency;
1367 
1368             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _PACKET, _COUNT, idx_vc);
1369             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].count += latency;
1370 
1371             // Note the time of this snap
1372             chip_device->latency_stats->latency[idx_vc].last_read_time_nsec = time_nsec;
1373             chip_device->latency_stats->latency[idx_vc].count++;
1374         }
1375 
1376         // Disable SNAPONDEMAND after fetching the latencies
1377         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1378             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1379             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1380     }
1381 }
1382 
1383 void
1384 nvswitch_ecc_writeback_task_lr10
1385 (
1386     nvswitch_device *device
1387 )
1388 {
1389 }
1390 
1391 void
1392 nvswitch_set_ganged_link_table_lr10
1393 (
1394     nvswitch_device *device,
1395     NvU32            firstIndex,
1396     NvU64           *ganged_link_table,
1397     NvU32            numEntries
1398 )
1399 {
1400     NvU32 i;
1401 
1402     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_ADDRESS,
1403         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _INDEX, firstIndex) |
1404         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _AUTO_INCR, 1));
1405 
1406     for (i = 0; i < numEntries; i++)
1407     {
1408         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1409             NvU64_LO32(ganged_link_table[i]));
1410 
1411         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1412             NvU64_HI32(ganged_link_table[i]));
1413     }
1414 }
1415 
1416 static NvlStatus
1417 _nvswitch_init_ganged_link_routing
1418 (
1419     nvswitch_device *device
1420 )
1421 {
1422     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1423     NvU32        gang_index, gang_size;
1424     NvU64        gang_entry;
1425     NvU32        block_index;
1426     NvU32        block_count = 16;
1427     NvU32        glt_entries = 16;
1428     NvU32        glt_size = ROUTE_GANG_TABLE_SIZE / 2;
1429     NvU64        *ganged_link_table = NULL;
1430     NvU32        block_size = ROUTE_GANG_TABLE_SIZE / block_count;
1431     NvU32        table_index = 0;
1432     NvU32        i;
1433 
1434     //
1435     // Refer to switch IAS 11.2 Figure 82. Limerock Ganged RAM Table Format
1436     //
1437     // The ganged link routing table is composed of 512 entries divided into 16 sections.
1438     // Each section specifies how requests should be routed through the ganged links.
1439     // Each 32-bit entry is composed of eight 4-bit fields specifying the set of of links
1440     // to distribute through.  More complex spray patterns could be constructed, but for
1441     // now initialize it with a uniform distribution pattern.
1442     //
1443     // The ganged link routing table will be loaded with following values:
1444     // Typically the first section would be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,15),...
1445     // Typically the second section would be filled with (0,0,0,0,0,0,0,0), (0,0,0,0,0,0,0,0),...
1446     // Typically the third section would be filled with (0,1,0,1,0,1,0,1), (0,1,0,1,0,1,0,1),...
1447     // Typically the third section would be filled with (0,1,2,0,1,2,0,1), (2,0,1,2,0,1,2,0),...
1448     //  :
1449     // The last section would typically be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,0),...
1450     //
1451     // Refer table 20: Definition of size bits used with Ganged Link Number Table.
1452     // Note that section 0 corresponds with 16 ganged links.  Section N corresponds with
1453     // N ganged links.
1454     //
1455 
1456     //Alloc memory for Ganged Link Table
1457     ganged_link_table = nvswitch_os_malloc(glt_size * sizeof(gang_entry));
1458     if (ganged_link_table == NULL)
1459     {
1460         NVSWITCH_PRINT(device, ERROR,
1461             "Failed to allocate memory for GLT!!\n");
1462         return -NVL_NO_MEM;
1463     }
1464 
1465     for (block_index = 0; block_index < block_count; block_index++)
1466     {
1467         gang_size = ((block_index==0) ? 16 : block_index);
1468 
1469         for (gang_index = 0; gang_index < block_size/2; gang_index++)
1470         {
1471             gang_entry = 0;
1472             NVSWITCH_ASSERT(table_index < glt_size);
1473 
1474             for (i = 0; i < glt_entries; i++)
1475             {
1476                 gang_entry |=
1477                     DRF_NUM64(_ROUTE, _REG_TABLE_DATA0, _GLX(i), (16 * gang_index + i) % gang_size);
1478             }
1479 
1480             ganged_link_table[table_index++] = gang_entry;
1481         }
1482     }
1483 
1484     nvswitch_set_ganged_link_table_lr10(device, 0, ganged_link_table, glt_size);
1485 
1486     chip_device->ganged_link_table = ganged_link_table;
1487 
1488     return NVL_SUCCESS;
1489 }
1490 
1491 static NvlStatus
1492 nvswitch_initialize_ip_wrappers_lr10
1493 (
1494     nvswitch_device *device
1495 )
1496 {
1497     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1498     NvU32 engine_enable_mask;
1499     NvU32 engine_disable_mask;
1500     NvU32 i, j;
1501     NvU32 idx_link;
1502 
1503     //
1504     // Now that software knows the devices and addresses, it must take all
1505     // the wrapper modules out of reset.  It does this by writing to the
1506     // PMC module enable registers.
1507     //
1508 
1509 // Temporary - bug 2069764
1510 //    NVSWITCH_REG_WR32(device, _PSMC, _ENABLE,
1511 //        DRF_DEF(_PSMC, _ENABLE, _SAW, _ENABLE) |
1512 //        DRF_DEF(_PSMC, _ENABLE, _PRIV_RING, _ENABLE) |
1513 //        DRF_DEF(_PSMC, _ENABLE, _PERFMON, _ENABLE));
1514 
1515     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE,
1516         DRF_DEF(_NVLSAW_NVSPMC, _ENABLE, _NXBAR, _ENABLE));
1517 
1518     //
1519     // At this point the list of discovered devices has been cross-referenced
1520     // with the ROM configuration, platform configuration, and regkey override.
1521     // The NVLIPT & NPORT enable filtering done here further updates the MMIO
1522     // information based on KVM.
1523     //
1524 
1525     // Enable the NVLIPT units that have been discovered
1526     engine_enable_mask = 0;
1527     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NVLW, ); i++)
1528     {
1529         if (NVSWITCH_ENG_IS_VALID(device, NVLW, i))
1530         {
1531             engine_enable_mask |= NVBIT(i);
1532         }
1533     }
1534     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT, engine_enable_mask);
1535 
1536     //
1537     // In bare metal we write ENABLE_NVLIPT to enable the units that aren't
1538     // disabled by ROM configuration, platform configuration, or regkey override.
1539     // If we are running inside a VM, the hypervisor has already set ENABLE_NVLIPT
1540     // and write protected it.  Reading ENABLE_NVLIPT tells us which units we
1541     // are allowed to use inside this VM.
1542     //
1543     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT);
1544     if (engine_enable_mask != ~engine_disable_mask)
1545     {
1546         NVSWITCH_PRINT(device, WARN,
1547             "NV_NVLSAW_NVSPMC_ENABLE_NVLIPT mismatch: wrote 0x%x, read 0x%x\n",
1548             engine_enable_mask,
1549             ~engine_disable_mask);
1550         NVSWITCH_PRINT(device, WARN,
1551             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NVLIPT readback until supported on fmodel\n");
1552         engine_disable_mask = ~engine_enable_mask;
1553     }
1554     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NVLW, )) - 1;
1555     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1556     {
1557         chip_device->engNVLW[i].valid = NV_FALSE;
1558         for (j = 0; j < NVSWITCH_LINKS_PER_NVLW; j++)
1559         {
1560             idx_link = i * NVSWITCH_LINKS_PER_NVLW + j;
1561             if (idx_link < NVSWITCH_LINK_COUNT(device))
1562             {
1563                 device->link[idx_link].valid = NV_FALSE;
1564                 //
1565                 // TODO: This invalidate used to also invalidate all the
1566                 // associated NVLW engFOO units. This is probably not necessary
1567                 // but code that bypasses the link valid check might touch the
1568                 // underlying units when they are not supposed to.
1569                 //
1570             }
1571         }
1572     }
1573     FOR_EACH_INDEX_IN_MASK_END;
1574 
1575     // Enable the NPORT units that have been discovered
1576     engine_enable_mask = 0;
1577     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NPG, ); i++)
1578     {
1579         if (NVSWITCH_ENG_IS_VALID(device, NPG, i))
1580         {
1581             engine_enable_mask |= NVBIT(i);
1582         }
1583     }
1584     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG, engine_enable_mask);
1585 
1586     //
1587     // In bare metal we write ENABLE_NPG to enable the units that aren't
1588     // disabled by ROM configuration, platform configuration, or regkey override.
1589     // If we are running inside a VM, the hypervisor has already set ENABLE_NPG
1590     // and write protected it.  Reading ENABLE_NPG tells us which units we
1591     // are allowed to use inside this VM.
1592     //
1593     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG);
1594     if (engine_enable_mask != ~engine_disable_mask)
1595     {
1596         NVSWITCH_PRINT(device, WARN,
1597             "NV_NVLSAW_NVSPMC_ENABLE_NPG mismatch: wrote 0x%x, read 0x%x\n",
1598             engine_enable_mask,
1599             ~engine_disable_mask);
1600         NVSWITCH_PRINT(device, WARN,
1601             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NPG readback until supported on fmodel\n");
1602         engine_disable_mask = ~engine_enable_mask;
1603     }
1604     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NPG, )) - 1;
1605     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1606     {
1607         chip_device->engNPG[i].valid = NV_FALSE;
1608         for (j = 0; j < NVSWITCH_LINKS_PER_NPG; j++)
1609         {
1610             idx_link = i * NVSWITCH_LINKS_PER_NPG + j;
1611 
1612             if (idx_link < NVSWITCH_LINK_COUNT(device))
1613             {
1614                 device->link[idx_link].valid = NV_FALSE;
1615                 //
1616                 // TODO: This invalidate used to also invalidate all the
1617                 // associated NPG engFOO units. This is probably not necessary
1618                 // but code that bypasses the link valid check might touch the
1619                 // underlying units when they are not supposed to.
1620                 //
1621             }
1622         }
1623     }
1624     FOR_EACH_INDEX_IN_MASK_END;
1625 
1626     return NVL_SUCCESS;
1627 }
1628 
1629 //
1630 // Bring units out of warm reset on boot.  Used by driver load.
1631 //
1632 void
1633 nvswitch_init_warm_reset_lr10
1634 (
1635     nvswitch_device *device
1636 )
1637 {
1638     NvU32 idx_npg;
1639     NvU32 idx_nport;
1640     NvU32 nport_mask;
1641     NvU32 nport_disable = 0;
1642 
1643 #if defined(NV_NPG_WARMRESET_NPORTDISABLE)
1644     nport_disable = DRF_NUM(_NPG, _WARMRESET, _NPORTDISABLE, ~nport_mask);
1645 #endif
1646 
1647     //
1648     // Walk the NPGs and build the mask of extant NPORTs
1649     //
1650     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
1651     {
1652         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
1653         {
1654             nport_mask = 0;
1655             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
1656             {
1657                 nport_mask |=
1658                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
1659                     NVBIT(idx_nport) : 0x0);
1660             }
1661 
1662             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
1663                 _NPG, _WARMRESET,
1664                 nport_disable |
1665                 DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, nport_mask));
1666         }
1667     }
1668 }
1669 
1670 /*
1671  * CTRL_NVSWITCH_SET_REMAP_POLICY
1672  */
1673 
1674 NvlStatus
1675 nvswitch_get_remap_table_selector_lr10
1676 (
1677     nvswitch_device *device,
1678     NVSWITCH_TABLE_SELECT_REMAP table_selector,
1679     NvU32 *remap_ram_sel
1680 )
1681 {
1682     NvU32 ram_sel = 0;
1683 
1684     switch (table_selector)
1685     {
1686         case NVSWITCH_TABLE_SELECT_REMAP_PRIMARY:
1687             ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM;
1688             break;
1689         default:
1690             // Unsupported remap table selector
1691             return -NVL_ERR_NOT_SUPPORTED;
1692             break;
1693     }
1694 
1695     if (remap_ram_sel)
1696     {
1697         *remap_ram_sel = ram_sel;
1698     }
1699 
1700     return NVL_SUCCESS;
1701 }
1702 
1703 NvU32
1704 nvswitch_get_ingress_ram_size_lr10
1705 (
1706     nvswitch_device *device,
1707     NvU32 ingress_ram_selector      // NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT*
1708 )
1709 {
1710     NvU32 ram_size = 0;
1711 
1712     switch (ingress_ram_selector)
1713     {
1714         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM:
1715             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_REMAPTAB_DEPTH + 1;
1716             break;
1717         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM:
1718             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH + 1;
1719             break;
1720         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM:
1721             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH + 1;
1722             break;
1723         default:
1724             // Unsupported ingress RAM selector
1725             break;
1726     }
1727 
1728     return ram_size;
1729 }
1730 
1731 static void
1732 _nvswitch_set_remap_policy_lr10
1733 (
1734     nvswitch_device *device,
1735     NvU32 portNum,
1736     NvU32 firstIndex,
1737     NvU32 numEntries,
1738     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy
1739 )
1740 {
1741     NvU32 i;
1742     NvU32 remap_address;
1743     NvU32 address_offset;
1744     NvU32 address_base;
1745     NvU32 address_limit;
1746 
1747     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
1748         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
1749         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
1750         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
1751 
1752     for (i = 0; i < numEntries; i++)
1753     {
1754         // Set each field if enabled, else set it to 0.
1755         remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_policy[i].address);
1756         address_offset = DRF_VAL64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, remap_policy[i].addressOffset);
1757         address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, remap_policy[i].addressBase);
1758         address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, remap_policy[i].addressLimit);
1759 
1760         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA1,
1761             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) |
1762             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk));
1763         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA2,
1764             DRF_NUM(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep) |
1765             DRF_NUM(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, address_offset));
1766         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA3,
1767             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_BASE, address_base) |
1768             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, address_limit));
1769         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA4,
1770             DRF_NUM(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy[i].targetId) |
1771             DRF_NUM(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy[i].flags));
1772 
1773         // Write last and auto-increment
1774         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA0,
1775             DRF_NUM(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_address) |
1776             DRF_NUM(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) |
1777             DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid));
1778     }
1779 }
1780 
1781 NvlStatus
1782 nvswitch_ctrl_set_remap_policy_lr10
1783 (
1784     nvswitch_device *device,
1785     NVSWITCH_SET_REMAP_POLICY *p
1786 )
1787 {
1788     NvU32 i;
1789     NvU32 rfunc;
1790     NvU32 ram_size;
1791     NvlStatus retval = NVL_SUCCESS;
1792 
1793     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
1794     {
1795         NVSWITCH_PRINT(device, ERROR,
1796             "NPORT port #%d not valid\n",
1797             p->portNum);
1798         return -NVL_BAD_ARGS;
1799     }
1800 
1801     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
1802     {
1803         NVSWITCH_PRINT(device, ERROR,
1804             "Remap table #%d not supported\n",
1805             p->tableSelect);
1806         return -NVL_ERR_NOT_SUPPORTED;
1807     }
1808 
1809     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
1810     if ((p->firstIndex >= ram_size) ||
1811         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
1812         (p->firstIndex + p->numEntries > ram_size))
1813     {
1814         NVSWITCH_PRINT(device, ERROR,
1815             "remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
1816             p->firstIndex, p->firstIndex + p->numEntries - 1,
1817             0, ram_size - 1,
1818             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
1819         return -NVL_BAD_ARGS;
1820     }
1821 
1822     for (i = 0; i < p->numEntries; i++)
1823     {
1824         if (p->remapPolicy[i].targetId &
1825             ~DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID))
1826         {
1827             NVSWITCH_PRINT(device, ERROR,
1828                 "remapPolicy[%d].targetId 0x%x out of valid range (0x%x..0x%x)\n",
1829                 i, p->remapPolicy[i].targetId,
1830                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID));
1831             return -NVL_BAD_ARGS;
1832         }
1833 
1834         if (p->remapPolicy[i].irlSelect &
1835             ~DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL))
1836         {
1837             NVSWITCH_PRINT(device, ERROR,
1838                 "remapPolicy[%d].irlSelect 0x%x out of valid range (0x%x..0x%x)\n",
1839                 i, p->remapPolicy[i].irlSelect,
1840                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL));
1841             return -NVL_BAD_ARGS;
1842         }
1843 
1844         rfunc = p->remapPolicy[i].flags &
1845             (
1846                 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR |
1847                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK |
1848                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE |
1849                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE |
1850                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET
1851             );
1852         if (rfunc != p->remapPolicy[i].flags)
1853         {
1854             NVSWITCH_PRINT(device, ERROR,
1855                 "remapPolicy[%d].flags 0x%x has undefined flags (0x%x)\n",
1856                 i, p->remapPolicy[i].flags,
1857                 p->remapPolicy[i].flags ^ rfunc);
1858             return -NVL_BAD_ARGS;
1859         }
1860 
1861         // Validate that only bits 46:36 are used
1862         if (p->remapPolicy[i].address &
1863             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10))
1864         {
1865             NVSWITCH_PRINT(device, ERROR,
1866                 "remapPolicy[%d].address 0x%llx & ~0x%llx != 0\n",
1867                 i, p->remapPolicy[i].address,
1868                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10));
1869             return -NVL_BAD_ARGS;
1870         }
1871 
1872         if (p->remapPolicy[i].reqCtxMask &
1873            ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK))
1874         {
1875             NVSWITCH_PRINT(device, ERROR,
1876                 "remapPolicy[%d].reqCtxMask 0x%x out of valid range (0x%x..0x%x)\n",
1877                 i, p->remapPolicy[i].reqCtxMask,
1878                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK));
1879             return -NVL_BAD_ARGS;
1880         }
1881 
1882         if (p->remapPolicy[i].reqCtxChk &
1883             ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK))
1884         {
1885             NVSWITCH_PRINT(device, ERROR,
1886                 "remapPolicy[%d].reqCtxChk 0x%x out of valid range (0x%x..0x%x)\n",
1887                 i, p->remapPolicy[i].reqCtxChk,
1888                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK));
1889             return -NVL_BAD_ARGS;
1890         }
1891 
1892         if (p->remapPolicy[i].reqCtxRep &
1893             ~DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP))
1894         {
1895             NVSWITCH_PRINT(device, ERROR,
1896                 "remapPolicy[%d].reqCtxRep 0x%x out of valid range (0x%x..0x%x)\n",
1897                 i, p->remapPolicy[i].reqCtxRep,
1898                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP));
1899             return -NVL_BAD_ARGS;
1900         }
1901 
1902         if ((p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET) &&
1903             !(p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE))
1904         {
1905             NVSWITCH_PRINT(device, ERROR,
1906                 "remapPolicy[%d].flags: _FLAGS_ADR_OFFSET should not be set if "
1907                 "_FLAGS_ADR_BASE is not set\n",
1908                 i);
1909             return -NVL_BAD_ARGS;
1910         }
1911 
1912         // Validate that only bits 35:20 are used
1913         if (p->remapPolicy[i].addressBase &
1914             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10))
1915         {
1916             NVSWITCH_PRINT(device, ERROR,
1917                 "remapPolicy[%d].addressBase 0x%llx & ~0x%llx != 0\n",
1918                 i, p->remapPolicy[i].addressBase,
1919                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10));
1920             return -NVL_BAD_ARGS;
1921         }
1922 
1923         // Validate that only bits 35:20 are used
1924         if (p->remapPolicy[i].addressLimit &
1925             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10))
1926         {
1927             NVSWITCH_PRINT(device, ERROR,
1928                  "remapPolicy[%d].addressLimit 0x%llx & ~0x%llx != 0\n",
1929                  i, p->remapPolicy[i].addressLimit,
1930                  DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10));
1931             return -NVL_BAD_ARGS;
1932         }
1933 
1934         // Validate base & limit describe a region
1935         if (p->remapPolicy[i].addressBase > p->remapPolicy[i].addressLimit)
1936         {
1937             NVSWITCH_PRINT(device, ERROR,
1938                  "remapPolicy[%d].addressBase/Limit invalid: 0x%llx > 0x%llx\n",
1939                  i, p->remapPolicy[i].addressBase, p->remapPolicy[i].addressLimit);
1940             return -NVL_BAD_ARGS;
1941         }
1942 
1943         // Validate that only bits 35:20 are used
1944         if (p->remapPolicy[i].addressOffset &
1945             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1946         {
1947             NVSWITCH_PRINT(device, ERROR,
1948                 "remapPolicy[%d].addressOffset 0x%llx & ~0x%llx != 0\n",
1949                 i, p->remapPolicy[i].addressOffset,
1950                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10));
1951             return -NVL_BAD_ARGS;
1952         }
1953 
1954         // Validate limit - base + offset doesn't overflow 64G
1955         if ((p->remapPolicy[i].addressLimit - p->remapPolicy[i].addressBase +
1956                 p->remapPolicy[i].addressOffset) &
1957             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1958         {
1959             NVSWITCH_PRINT(device, ERROR,
1960                 "remapPolicy[%d].addressLimit 0x%llx - addressBase 0x%llx + "
1961                 "addressOffset 0x%llx overflows 64GB\n",
1962                 i, p->remapPolicy[i].addressLimit, p->remapPolicy[i].addressBase,
1963                 p->remapPolicy[i].addressOffset);
1964             return -NVL_BAD_ARGS;
1965         }
1966     }
1967 
1968     _nvswitch_set_remap_policy_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->remapPolicy);
1969 
1970     return retval;
1971 }
1972 
1973 /*
1974  * CTRL_NVSWITCH_GET_REMAP_POLICY
1975  */
1976 
1977 #define NVSWITCH_NUM_REMAP_POLICY_REGS_LR10 5
1978 
1979 NvlStatus
1980 nvswitch_ctrl_get_remap_policy_lr10
1981 (
1982     nvswitch_device *device,
1983     NVSWITCH_GET_REMAP_POLICY_PARAMS *params
1984 )
1985 {
1986     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy;
1987     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
1988     NvU32 table_index;
1989     NvU32 remap_count;
1990     NvU32 remap_address;
1991     NvU32 address_offset;
1992     NvU32 address_base;
1993     NvU32 address_limit;
1994     NvU32 ram_size;
1995 
1996     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
1997     {
1998         NVSWITCH_PRINT(device, ERROR,
1999             "NPORT port #%d not valid\n",
2000             params->portNum);
2001         return -NVL_BAD_ARGS;
2002     }
2003 
2004     if (params->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2005     {
2006         NVSWITCH_PRINT(device, ERROR,
2007             "Remap table #%d not supported\n",
2008             params->tableSelect);
2009         return -NVL_ERR_NOT_SUPPORTED;
2010     }
2011 
2012     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2013     if ((params->firstIndex >= ram_size))
2014     {
2015         NVSWITCH_PRINT(device, ERROR,
2016             "%s: remapPolicy first index %d out of range[%d..%d].\n",
2017             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2018         return -NVL_BAD_ARGS;
2019     }
2020 
2021     nvswitch_os_memset(params->entry, 0, (NVSWITCH_REMAP_POLICY_ENTRIES_MAX *
2022         sizeof(NVSWITCH_REMAP_POLICY_ENTRY)));
2023 
2024     table_index = params->firstIndex;
2025     remap_policy = params->entry;
2026     remap_count = 0;
2027 
2028     /* set table offset */
2029     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2030         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2031         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2032         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2033 
2034     while (remap_count < NVSWITCH_REMAP_POLICY_ENTRIES_MAX &&
2035         table_index < ram_size)
2036     {
2037         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2038         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2039         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2040         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2041         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2042 
2043         /* add to remap_entries list if nonzero */
2044         if (remap_policy_data[0] || remap_policy_data[1] || remap_policy_data[2] ||
2045             remap_policy_data[3] || remap_policy_data[4])
2046         {
2047             remap_policy[remap_count].irlSelect =
2048                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy_data[0]);
2049 
2050             remap_policy[remap_count].entryValid =
2051                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy_data[0]);
2052 
2053             remap_address =
2054                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_policy_data[0]);
2055 
2056             remap_policy[remap_count].address =
2057                 DRF_NUM64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_address);
2058 
2059             remap_policy[remap_count].reqCtxMask =
2060                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy_data[1]);
2061 
2062             remap_policy[remap_count].reqCtxChk =
2063                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy_data[1]);
2064 
2065             remap_policy[remap_count].reqCtxRep =
2066                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy_data[2]);
2067 
2068             address_offset =
2069                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, remap_policy_data[2]);
2070 
2071             remap_policy[remap_count].addressOffset =
2072                 DRF_NUM64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, address_offset);
2073 
2074             address_base =
2075                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_BASE, remap_policy_data[3]);
2076 
2077             remap_policy[remap_count].addressBase =
2078                 DRF_NUM64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, address_base);
2079 
2080             address_limit =
2081                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, remap_policy_data[3]);
2082 
2083             remap_policy[remap_count].addressLimit =
2084                 DRF_NUM64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, address_limit);
2085 
2086             remap_policy[remap_count].targetId =
2087                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy_data[4]);
2088 
2089             remap_policy[remap_count].flags =
2090                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy_data[4]);
2091 
2092             remap_count++;
2093         }
2094 
2095         table_index++;
2096     }
2097 
2098     params->nextIndex = table_index;
2099     params->numEntries = remap_count;
2100 
2101     return NVL_SUCCESS;
2102 }
2103 
2104 /*
2105  * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID
2106  */
2107 NvlStatus
2108 nvswitch_ctrl_set_remap_policy_valid_lr10
2109 (
2110     nvswitch_device *device,
2111     NVSWITCH_SET_REMAP_POLICY_VALID *p
2112 )
2113 {
2114     NvU32 remap_ram;
2115     NvU32 ram_address = p->firstIndex;
2116     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
2117     NvU32 i;
2118     NvU32 ram_size;
2119 
2120     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2121     {
2122         NVSWITCH_PRINT(device, ERROR,
2123             "%s: NPORT port #%d not valid\n",
2124             __FUNCTION__, p->portNum);
2125         return -NVL_BAD_ARGS;
2126     }
2127 
2128     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2129     {
2130         NVSWITCH_PRINT(device, ERROR,
2131             "Remap table #%d not supported\n",
2132             p->tableSelect);
2133         return -NVL_ERR_NOT_SUPPORTED;
2134     }
2135 
2136     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2137     if ((p->firstIndex >= ram_size) ||
2138         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
2139         (p->firstIndex + p->numEntries > ram_size))
2140     {
2141         NVSWITCH_PRINT(device, ERROR,
2142             "%s: remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
2143             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2144             0, ram_size - 1,
2145             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
2146         return -NVL_BAD_ARGS;
2147     }
2148 
2149     // Select REMAPPOLICY RAM and disable Auto Increament.
2150     remap_ram =
2151         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2152         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2153 
2154     for (i = 0; i < p->numEntries; i++)
2155     {
2156         /* set the ram address */
2157         remap_ram = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, remap_ram);
2158         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, remap_ram);
2159 
2160         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2161         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2162         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2163         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2164         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2165 
2166         // Set valid bit in REMAPTABDATA0.
2167         remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]);
2168 
2169         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4, remap_policy_data[4]);
2170         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3, remap_policy_data[3]);
2171         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2, remap_policy_data[2]);
2172         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1, remap_policy_data[1]);
2173         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0, remap_policy_data[0]);
2174     }
2175 
2176     return NVL_SUCCESS;
2177 }
2178 
2179 //
2180 // Programming invalid entries to 0x3F causes Route block to detect an invalid port number
2181 // and flag a PRIV error to the FM. (See Table 14.RID RAM Programming, IAS 3.3.4)
2182 //
2183 
2184 #define NVSWITCH_INVALID_PORT_VAL_LR10   0x3F
2185 #define NVSWITCH_INVALID_VC_VAL_LR10     0x0
2186 
2187 #define NVSWITCH_PORTLIST_PORT_LR10(_entry, _idx) \
2188     ((_idx < _entry.numEntries) ? _entry.portList[_idx].destPortNum : NVSWITCH_INVALID_PORT_VAL_LR10)
2189 
2190 #define NVSWITCH_PORTLIST_VC_LR10(_entry, _idx) \
2191     ((_idx < _entry.numEntries) ? _entry.portList[_idx].vcMap : NVSWITCH_INVALID_VC_VAL_LR10)
2192 
2193 /*
2194  * CTRL_NVSWITCH_SET_ROUTING_ID
2195  */
2196 
2197 static void
2198 _nvswitch_set_routing_id_lr10
2199 (
2200     nvswitch_device *device,
2201     NvU32 portNum,
2202     NvU32 firstIndex,
2203     NvU32 numEntries,
2204     NVSWITCH_ROUTING_ID_ENTRY *routing_id
2205 )
2206 {
2207     NvU32 i;
2208     NvU32 rmod;
2209 
2210     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2211         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2212         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2213         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2214 
2215     for (i = 0; i < numEntries; i++)
2216     {
2217         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA1,
2218             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT3,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 3)) |
2219             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE3, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 3))   |
2220             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT4,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 4)) |
2221             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE4, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 4))   |
2222             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT5,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 5)) |
2223             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE5, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 5)));
2224 
2225         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA2,
2226             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT6,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 6)) |
2227             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE6, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 6))   |
2228             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT7,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 7)) |
2229             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE7, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 7))   |
2230             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT8,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 8)) |
2231             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE8, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 8)));
2232 
2233         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA3,
2234             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT9,     NVSWITCH_PORTLIST_PORT_LR10(routing_id[i],  9)) |
2235             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE9,  NVSWITCH_PORTLIST_VC_LR10(routing_id[i],  9))   |
2236             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT10,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 10)) |
2237             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE10, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 10))   |
2238             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT11,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 11)) |
2239             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE11, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 11)));
2240 
2241         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA4,
2242             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT12,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 12)) |
2243             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE12, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 12))   |
2244             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT13,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 13)) |
2245             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE13, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 13))   |
2246             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT14,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 14)) |
2247             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE14, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 14)));
2248 
2249         rmod =
2250             (routing_id[i].useRoutingLan ? NVBIT(6) : 0) |
2251             (routing_id[i].enableIrlErrResponse ? NVBIT(9) : 0);
2252 
2253         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA5,
2254             DRF_NUM(_INGRESS, _RIDTABDATA5, _PORT15,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 15)) |
2255             DRF_NUM(_INGRESS, _RIDTABDATA5, _VC_MODE15, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 15))   |
2256             DRF_NUM(_INGRESS, _RIDTABDATA5, _RMOD,      rmod)                                           |
2257             DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,  routing_id[i].entryValid));
2258 
2259         NVSWITCH_ASSERT(routing_id[i].numEntries <= 16);
2260         // Write last and auto-increment
2261         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA0,
2262             DRF_NUM(_INGRESS, _RIDTABDATA0, _GSIZE,
2263                 (routing_id[i].numEntries == 16) ? 0x0 : routing_id[i].numEntries) |
2264             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT0,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 0)) |
2265             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE0, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 0))   |
2266             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT1,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 1)) |
2267             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE1, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 1))   |
2268             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT2,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 2)) |
2269             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE2, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 2)));
2270     }
2271 }
2272 
2273 #define NVSWITCH_NUM_RIDTABDATA_REGS_LR10 6
2274 
2275 NvlStatus
2276 nvswitch_ctrl_get_routing_id_lr10
2277 (
2278     nvswitch_device *device,
2279     NVSWITCH_GET_ROUTING_ID_PARAMS *params
2280 )
2281 {
2282     NVSWITCH_ROUTING_ID_IDX_ENTRY *rid_entries;
2283     NvU32 table_index;
2284     NvU32 rid_tab_data[NVSWITCH_NUM_RIDTABDATA_REGS_LR10]; // 6 RID tables
2285     NvU32 rid_count;
2286     NvU32 rmod;
2287     NvU32 gsize;
2288     NvU32 ram_size;
2289 
2290     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2291     {
2292         NVSWITCH_PRINT(device, ERROR,
2293             "%s: NPORT port #%d not valid\n",
2294             __FUNCTION__, params->portNum);
2295         return -NVL_BAD_ARGS;
2296     }
2297 
2298     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2299     if (params->firstIndex >= ram_size)
2300     {
2301         NVSWITCH_PRINT(device, ERROR,
2302             "%s: routingId first index %d out of range[%d..%d].\n",
2303             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2304         return -NVL_BAD_ARGS;
2305     }
2306 
2307     nvswitch_os_memset(params->entries, 0, sizeof(params->entries));
2308 
2309     table_index = params->firstIndex;
2310     rid_entries = params->entries;
2311     rid_count = 0;
2312 
2313     /* set table offset */
2314     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2315         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2316         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2317         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2318 
2319     while (rid_count < NVSWITCH_ROUTING_ID_ENTRIES_MAX &&
2320            table_index < ram_size)
2321     {
2322         rid_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2323         rid_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2324         rid_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2325         rid_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2326         rid_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2327         rid_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2328 
2329         /* add to rid_entries list if nonzero */
2330         if (rid_tab_data[0] || rid_tab_data[1] || rid_tab_data[2] ||
2331             rid_tab_data[3] || rid_tab_data[4] || rid_tab_data[5])
2332         {
2333             rid_entries[rid_count].entry.portList[0].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT0, rid_tab_data[0]);
2334             rid_entries[rid_count].entry.portList[0].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE0, rid_tab_data[0]);
2335 
2336             rid_entries[rid_count].entry.portList[1].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT1, rid_tab_data[0]);
2337             rid_entries[rid_count].entry.portList[1].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE1, rid_tab_data[0]);
2338 
2339             rid_entries[rid_count].entry.portList[2].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT2, rid_tab_data[0]);
2340             rid_entries[rid_count].entry.portList[2].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE2, rid_tab_data[0]);
2341 
2342             rid_entries[rid_count].entry.portList[3].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT3, rid_tab_data[1]);
2343             rid_entries[rid_count].entry.portList[3].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE3, rid_tab_data[1]);
2344 
2345             rid_entries[rid_count].entry.portList[4].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT4, rid_tab_data[1]);
2346             rid_entries[rid_count].entry.portList[4].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE4, rid_tab_data[1]);
2347 
2348             rid_entries[rid_count].entry.portList[5].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT5, rid_tab_data[1]);
2349             rid_entries[rid_count].entry.portList[5].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE5, rid_tab_data[1]);
2350 
2351             rid_entries[rid_count].entry.portList[6].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT6, rid_tab_data[2]);
2352             rid_entries[rid_count].entry.portList[6].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE6, rid_tab_data[2]);
2353 
2354             rid_entries[rid_count].entry.portList[7].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT7, rid_tab_data[2]);
2355             rid_entries[rid_count].entry.portList[7].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE7, rid_tab_data[2]);
2356 
2357             rid_entries[rid_count].entry.portList[8].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT8, rid_tab_data[2]);
2358             rid_entries[rid_count].entry.portList[8].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE8, rid_tab_data[2]);
2359 
2360             rid_entries[rid_count].entry.portList[9].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT9, rid_tab_data[3]);
2361             rid_entries[rid_count].entry.portList[9].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE9, rid_tab_data[3]);
2362 
2363             rid_entries[rid_count].entry.portList[10].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT10, rid_tab_data[3]);
2364             rid_entries[rid_count].entry.portList[10].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE10, rid_tab_data[3]);
2365 
2366             rid_entries[rid_count].entry.portList[11].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT11, rid_tab_data[3]);
2367             rid_entries[rid_count].entry.portList[11].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE11, rid_tab_data[3]);
2368 
2369             rid_entries[rid_count].entry.portList[12].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT12, rid_tab_data[4]);
2370             rid_entries[rid_count].entry.portList[12].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE12, rid_tab_data[4]);
2371 
2372             rid_entries[rid_count].entry.portList[13].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT13, rid_tab_data[4]);
2373             rid_entries[rid_count].entry.portList[13].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE13, rid_tab_data[4]);
2374 
2375             rid_entries[rid_count].entry.portList[14].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT14, rid_tab_data[4]);
2376             rid_entries[rid_count].entry.portList[14].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE14, rid_tab_data[4]);
2377 
2378             rid_entries[rid_count].entry.portList[15].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA5, _PORT15, rid_tab_data[5]);
2379             rid_entries[rid_count].entry.portList[15].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA5, _VC_MODE15, rid_tab_data[5]);
2380             rid_entries[rid_count].entry.entryValid               = DRF_VAL(_INGRESS, _RIDTABDATA5, _ACLVALID, rid_tab_data[5]);
2381 
2382             rmod = DRF_VAL(_INGRESS, _RIDTABDATA5, _RMOD, rid_tab_data[5]);
2383             rid_entries[rid_count].entry.useRoutingLan = (NVBIT(6) & rmod) ? 1 : 0;
2384             rid_entries[rid_count].entry.enableIrlErrResponse = (NVBIT(9) & rmod) ? 1 : 0;
2385 
2386             // Gsize of 16 falls into the 0th entry of GLT region. The _GSIZE field must be mapped accordingly
2387             // to the number of port entries (See IAS, Table 20, Sect 3.4.2.2. Packet Routing).
2388             gsize = DRF_VAL(_INGRESS, _RIDTABDATA0, _GSIZE, rid_tab_data[0]);
2389             rid_entries[rid_count].entry.numEntries = ((gsize == 0) ? 16 : gsize);
2390 
2391             rid_entries[rid_count].idx = table_index;
2392             rid_count++;
2393         }
2394 
2395         table_index++;
2396     }
2397 
2398     params->nextIndex = table_index;
2399     params->numEntries = rid_count;
2400 
2401     return NVL_SUCCESS;
2402 }
2403 
2404 NvlStatus
2405 nvswitch_ctrl_set_routing_id_valid_lr10
2406 (
2407     nvswitch_device *device,
2408     NVSWITCH_SET_ROUTING_ID_VALID *p
2409 )
2410 {
2411     NvU32 rid_ctrl;
2412     NvU32 rid_tab_data0;
2413     NvU32 rid_tab_data1;
2414     NvU32 rid_tab_data2;
2415     NvU32 rid_tab_data3;
2416     NvU32 rid_tab_data4;
2417     NvU32 rid_tab_data5;
2418     NvU32 ram_address = p->firstIndex;
2419     NvU32 i;
2420     NvU32 ram_size;
2421 
2422     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2423     {
2424         NVSWITCH_PRINT(device, ERROR,
2425             "%s: NPORT port #%d not valid\n",
2426             __FUNCTION__, p->portNum);
2427         return -NVL_BAD_ARGS;
2428     }
2429 
2430     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2431     if ((p->firstIndex >= ram_size) ||
2432         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2433         (p->firstIndex + p->numEntries > ram_size))
2434     {
2435         NVSWITCH_PRINT(device, ERROR,
2436             "%s: routingId[%d..%d] overflows range %d..%d or size %d.\n",
2437             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2438             0, ram_size - 1,
2439             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2440         return -NVL_BAD_ARGS;
2441     }
2442 
2443     // Select RID RAM and disable Auto Increment.
2444     rid_ctrl =
2445         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2446         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2447 
2448 
2449     for (i = 0; i < p->numEntries; i++)
2450     {
2451         /* set the ram address */
2452         rid_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rid_ctrl);
2453         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rid_ctrl);
2454 
2455         rid_tab_data0 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2456         rid_tab_data1 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2457         rid_tab_data2 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2458         rid_tab_data3 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2459         rid_tab_data4 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2460         rid_tab_data5 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2461 
2462         // Set the valid bit in _RIDTABDATA5
2463         rid_tab_data5 = FLD_SET_DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,
2464             p->entryValid[i], rid_tab_data5);
2465 
2466         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1, rid_tab_data1);
2467         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2, rid_tab_data2);
2468         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3, rid_tab_data3);
2469         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4, rid_tab_data4);
2470         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5, rid_tab_data5);
2471         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0, rid_tab_data0);
2472     }
2473 
2474     return NVL_SUCCESS;
2475 }
2476 
2477 NvlStatus
2478 nvswitch_ctrl_set_routing_id_lr10
2479 (
2480     nvswitch_device *device,
2481     NVSWITCH_SET_ROUTING_ID *p
2482 )
2483 {
2484     NvU32 i, j;
2485     NvlStatus retval = NVL_SUCCESS;
2486     NvU32 ram_size;
2487 
2488     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2489     {
2490         NVSWITCH_PRINT(device, ERROR,
2491             "NPORT port #%d not valid\n",
2492             p->portNum);
2493         return -NVL_BAD_ARGS;
2494     }
2495 
2496     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2497     if ((p->firstIndex >= ram_size) ||
2498         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2499         (p->firstIndex + p->numEntries > ram_size))
2500     {
2501         NVSWITCH_PRINT(device, ERROR,
2502             "routingId[%d..%d] overflows range %d..%d or size %d.\n",
2503             p->firstIndex, p->firstIndex + p->numEntries - 1,
2504             0, ram_size - 1,
2505             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2506         return -NVL_BAD_ARGS;
2507     }
2508 
2509     for (i = 0; i < p->numEntries; i++)
2510     {
2511         if ((p->routingId[i].numEntries < 1) ||
2512             (p->routingId[i].numEntries > NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX))
2513         {
2514             NVSWITCH_PRINT(device, ERROR,
2515                 "routingId[%d].portList[] size %d overflows range %d..%d\n",
2516                 i, p->routingId[i].numEntries,
2517                 1, NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX);
2518             return -NVL_BAD_ARGS;
2519         }
2520 
2521         for (j = 0; j < p->routingId[i].numEntries; j++)
2522         {
2523             if (p->routingId[i].portList[j].vcMap > DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0))
2524             {
2525                 NVSWITCH_PRINT(device, ERROR,
2526                     "routingId[%d].portList[%d] vcMap 0x%x out of valid range (0x%x..0x%x)\n",
2527                     i, j,
2528                     p->routingId[i].portList[j].vcMap,
2529                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0));
2530                 return -NVL_BAD_ARGS;
2531             }
2532 
2533             if (p->routingId[i].portList[j].destPortNum > DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0))
2534             {
2535                 NVSWITCH_PRINT(device, ERROR,
2536                     "routingId[%d].portList[%d] destPortNum 0x%x out of valid range (0x%x..0x%x)\n",
2537                     i, j,
2538                     p->routingId[i].portList[j].destPortNum,
2539                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0));
2540                 return -NVL_BAD_ARGS;
2541             }
2542         }
2543     }
2544 
2545     _nvswitch_set_routing_id_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingId);
2546 
2547     return retval;
2548 }
2549 
2550 /*
2551  * CTRL_NVSWITCH_SET_ROUTING_LAN
2552  */
2553 
2554 //
2555 // Check the data field is present in the list.  Return either the data field
2556 // or default if not present.
2557 //
2558 #define NVSWITCH_PORTLIST_VALID_LR10(_entry, _idx, _field, _default) \
2559     ((_idx < _entry.numEntries) ? _entry.portList[_idx]._field  : _default)
2560 
2561 static void
2562 _nvswitch_set_routing_lan_lr10
2563 (
2564     nvswitch_device *device,
2565     NvU32 portNum,
2566     NvU32 firstIndex,
2567     NvU32 numEntries,
2568     NVSWITCH_ROUTING_LAN_ENTRY *routing_lan
2569 )
2570 {
2571     NvU32 i;
2572 
2573     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2574         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2575         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2576         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2577 
2578     for (i = 0; i < numEntries; i++)
2579     {
2580         //
2581         // NOTE: The GRP_SIZE field is 4-bits.  A subgroup is size 1 through 16
2582         // with encoding 0x0=16 and 0x1=1, ..., 0xF=15.
2583         // Programming of GRP_SIZE takes advantage of the inherent masking of
2584         // DRF_NUM to truncate 16 to 0.
2585         // See bug #3300673
2586         //
2587 
2588         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA1,
2589             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSelect, 0)) |
2590             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSize, 1)) |
2591             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSelect, 0)) |
2592             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSize, 1)) |
2593             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSelect, 0)) |
2594             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSize, 1)));
2595 
2596         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA2,
2597             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSelect, 0)) |
2598             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSize, 1)) |
2599             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSelect, 0)) |
2600             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSize, 1)) |
2601             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSelect, 0)) |
2602             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSize, 1)));
2603 
2604         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA3,
2605             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSelect, 0)) |
2606             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSize, 1)) |
2607             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSelect, 0)) |
2608             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSize, 1)) |
2609             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSelect, 0)) |
2610             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSize, 1)));
2611 
2612         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA4,
2613             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSelect, 0)) |
2614             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSize, 1)) |
2615             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSelect, 0)) |
2616             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSize, 1)) |
2617             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSelect, 0)) |
2618             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSize, 1)));
2619 
2620         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA5,
2621             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSelect, 0)) |
2622             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSize, 1)) |
2623             DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,  routing_lan[i].entryValid));
2624 
2625         // Write last and auto-increment
2626         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA0,
2627             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSelect, 0)) |
2628             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSize, 1)) |
2629             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSelect, 0)) |
2630             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSize, 1)) |
2631             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSelect, 0)) |
2632             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSize, 1)));
2633     }
2634 }
2635 
2636 NvlStatus
2637 nvswitch_ctrl_set_routing_lan_lr10
2638 (
2639     nvswitch_device *device,
2640     NVSWITCH_SET_ROUTING_LAN *p
2641 )
2642 {
2643     NvU32 i, j;
2644     NvlStatus retval = NVL_SUCCESS;
2645     NvU32 ram_size;
2646 
2647     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2648     {
2649         NVSWITCH_PRINT(device, ERROR,
2650             "%s: NPORT port #%d not valid\n",
2651             __FUNCTION__, p->portNum);
2652         return -NVL_BAD_ARGS;
2653     }
2654 
2655     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2656     if ((p->firstIndex >= ram_size) ||
2657         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2658         (p->firstIndex + p->numEntries > ram_size))
2659     {
2660         NVSWITCH_PRINT(device, ERROR,
2661             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2662             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2663             0, ram_size - 1,
2664             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2665         return -NVL_BAD_ARGS;
2666     }
2667 
2668     for (i = 0; i < p->numEntries; i++)
2669     {
2670         if (p->routingLan[i].numEntries > NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX)
2671         {
2672             NVSWITCH_PRINT(device, ERROR,
2673                 "%s: routingLan[%d].portList[] size %d overflows range %d..%d\n",
2674                 __FUNCTION__, i, p->routingLan[i].numEntries,
2675                 0, NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX);
2676             return -NVL_BAD_ARGS;
2677         }
2678 
2679         for (j = 0; j < p->routingLan[i].numEntries; j++)
2680         {
2681             if (p->routingLan[i].portList[j].groupSelect > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0))
2682             {
2683                 NVSWITCH_PRINT(device, ERROR,
2684                     "%s: routingLan[%d].portList[%d] groupSelect 0x%x out of valid range (0x%x..0x%x)\n",
2685                     __FUNCTION__, i, j,
2686                     p->routingLan[i].portList[j].groupSelect,
2687                     0, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0));
2688                 return -NVL_BAD_ARGS;
2689             }
2690 
2691             if ((p->routingLan[i].portList[j].groupSize == 0) ||
2692                 (p->routingLan[i].portList[j].groupSize > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1))
2693             {
2694                 NVSWITCH_PRINT(device, ERROR,
2695                     "%s: routingLan[%d].portList[%d] groupSize 0x%x out of valid range (0x%x..0x%x)\n",
2696                     __FUNCTION__, i, j,
2697                     p->routingLan[i].portList[j].groupSize,
2698                     1, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1);
2699                 return -NVL_BAD_ARGS;
2700             }
2701         }
2702     }
2703 
2704     _nvswitch_set_routing_lan_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingLan);
2705 
2706     return retval;
2707 }
2708 
2709 #define NVSWITCH_NUM_RLANTABDATA_REGS_LR10 6
2710 
2711 NvlStatus
2712 nvswitch_ctrl_get_routing_lan_lr10
2713 (
2714     nvswitch_device *device,
2715     NVSWITCH_GET_ROUTING_LAN_PARAMS *params
2716 )
2717 {
2718     NVSWITCH_ROUTING_LAN_IDX_ENTRY *rlan_entries;
2719     NvU32 table_index;
2720     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2721     NvU32 rlan_count;
2722     NvU32 ram_size;
2723 
2724     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2725     {
2726         NVSWITCH_PRINT(device, ERROR,
2727             "%s: NPORT port #%d not valid\n",
2728             __FUNCTION__, params->portNum);
2729         return -NVL_BAD_ARGS;
2730     }
2731 
2732     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2733     if ((params->firstIndex >= ram_size))
2734     {
2735         NVSWITCH_PRINT(device, ERROR,
2736             "%s: routingLan first index %d out of range[%d..%d].\n",
2737             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2738         return -NVL_BAD_ARGS;
2739     }
2740 
2741     nvswitch_os_memset(params->entries, 0, (NVSWITCH_ROUTING_LAN_ENTRIES_MAX *
2742         sizeof(NVSWITCH_ROUTING_LAN_IDX_ENTRY)));
2743 
2744     table_index = params->firstIndex;
2745     rlan_entries = params->entries;
2746     rlan_count = 0;
2747 
2748     /* set table offset */
2749     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2750         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2751         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM)   |
2752         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2753 
2754     while (rlan_count < NVSWITCH_ROUTING_LAN_ENTRIES_MAX &&
2755            table_index < ram_size)
2756     {
2757         /* read one entry */
2758         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2759         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2760         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2761         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2762         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2763         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2764 
2765         /* add to rlan_entries list if nonzero */
2766         if (rlan_tab_data[0] || rlan_tab_data[1] || rlan_tab_data[2] ||
2767             rlan_tab_data[3] || rlan_tab_data[4] || rlan_tab_data[5])
2768         {
2769             rlan_entries[rlan_count].entry.portList[0].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, rlan_tab_data[0]);
2770             rlan_entries[rlan_count].entry.portList[0].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, rlan_tab_data[0]);
2771             if (rlan_entries[rlan_count].entry.portList[0].groupSize == 0)
2772             {
2773                 rlan_entries[rlan_count].entry.portList[0].groupSize = 16;
2774             }
2775 
2776             rlan_entries[rlan_count].entry.portList[1].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, rlan_tab_data[0]);
2777             rlan_entries[rlan_count].entry.portList[1].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, rlan_tab_data[0]);
2778             if (rlan_entries[rlan_count].entry.portList[1].groupSize == 0)
2779             {
2780                 rlan_entries[rlan_count].entry.portList[1].groupSize = 16;
2781             }
2782 
2783             rlan_entries[rlan_count].entry.portList[2].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, rlan_tab_data[0]);
2784             rlan_entries[rlan_count].entry.portList[2].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, rlan_tab_data[0]);
2785             if (rlan_entries[rlan_count].entry.portList[2].groupSize == 0)
2786             {
2787                 rlan_entries[rlan_count].entry.portList[2].groupSize = 16;
2788             }
2789 
2790             rlan_entries[rlan_count].entry.portList[3].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, rlan_tab_data[1]);
2791             rlan_entries[rlan_count].entry.portList[3].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, rlan_tab_data[1]);
2792             if (rlan_entries[rlan_count].entry.portList[3].groupSize == 0)
2793             {
2794                 rlan_entries[rlan_count].entry.portList[3].groupSize = 16;
2795             }
2796 
2797             rlan_entries[rlan_count].entry.portList[4].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, rlan_tab_data[1]);
2798             rlan_entries[rlan_count].entry.portList[4].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, rlan_tab_data[1]);
2799             if (rlan_entries[rlan_count].entry.portList[4].groupSize == 0)
2800             {
2801                 rlan_entries[rlan_count].entry.portList[4].groupSize = 16;
2802             }
2803 
2804             rlan_entries[rlan_count].entry.portList[5].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, rlan_tab_data[1]);
2805             rlan_entries[rlan_count].entry.portList[5].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, rlan_tab_data[1]);
2806             if (rlan_entries[rlan_count].entry.portList[5].groupSize == 0)
2807             {
2808                 rlan_entries[rlan_count].entry.portList[5].groupSize = 16;
2809             }
2810 
2811             rlan_entries[rlan_count].entry.portList[6].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, rlan_tab_data[2]);
2812             rlan_entries[rlan_count].entry.portList[6].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, rlan_tab_data[2]);
2813             if (rlan_entries[rlan_count].entry.portList[6].groupSize == 0)
2814             {
2815                 rlan_entries[rlan_count].entry.portList[6].groupSize = 16;
2816             }
2817 
2818             rlan_entries[rlan_count].entry.portList[7].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, rlan_tab_data[2]);
2819             rlan_entries[rlan_count].entry.portList[7].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, rlan_tab_data[2]);
2820             if (rlan_entries[rlan_count].entry.portList[7].groupSize == 0)
2821             {
2822                 rlan_entries[rlan_count].entry.portList[7].groupSize = 16;
2823             }
2824 
2825             rlan_entries[rlan_count].entry.portList[8].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, rlan_tab_data[2]);
2826             rlan_entries[rlan_count].entry.portList[8].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, rlan_tab_data[2]);
2827             if (rlan_entries[rlan_count].entry.portList[8].groupSize == 0)
2828             {
2829                 rlan_entries[rlan_count].entry.portList[8].groupSize = 16;
2830             }
2831 
2832             rlan_entries[rlan_count].entry.portList[9].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, rlan_tab_data[3]);
2833             rlan_entries[rlan_count].entry.portList[9].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, rlan_tab_data[3]);
2834             if (rlan_entries[rlan_count].entry.portList[9].groupSize == 0)
2835             {
2836                 rlan_entries[rlan_count].entry.portList[9].groupSize = 16;
2837             }
2838 
2839             rlan_entries[rlan_count].entry.portList[10].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, rlan_tab_data[3]);
2840             rlan_entries[rlan_count].entry.portList[10].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, rlan_tab_data[3]);
2841             if (rlan_entries[rlan_count].entry.portList[10].groupSize == 0)
2842             {
2843                 rlan_entries[rlan_count].entry.portList[10].groupSize = 16;
2844             }
2845 
2846             rlan_entries[rlan_count].entry.portList[11].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, rlan_tab_data[3]);
2847             rlan_entries[rlan_count].entry.portList[11].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, rlan_tab_data[3]);
2848             if (rlan_entries[rlan_count].entry.portList[11].groupSize == 0)
2849             {
2850                 rlan_entries[rlan_count].entry.portList[11].groupSize = 16;
2851             }
2852 
2853             rlan_entries[rlan_count].entry.portList[12].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, rlan_tab_data[4]);
2854             rlan_entries[rlan_count].entry.portList[12].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, rlan_tab_data[4]);
2855             if (rlan_entries[rlan_count].entry.portList[12].groupSize == 0)
2856             {
2857                 rlan_entries[rlan_count].entry.portList[12].groupSize = 16;
2858             }
2859 
2860             rlan_entries[rlan_count].entry.portList[13].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, rlan_tab_data[4]);
2861             rlan_entries[rlan_count].entry.portList[13].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, rlan_tab_data[4]);
2862             if (rlan_entries[rlan_count].entry.portList[13].groupSize == 0)
2863             {
2864                 rlan_entries[rlan_count].entry.portList[13].groupSize = 16;
2865             }
2866 
2867             rlan_entries[rlan_count].entry.portList[14].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, rlan_tab_data[4]);
2868             rlan_entries[rlan_count].entry.portList[14].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, rlan_tab_data[4]);
2869             if (rlan_entries[rlan_count].entry.portList[14].groupSize == 0)
2870             {
2871                 rlan_entries[rlan_count].entry.portList[14].groupSize = 16;
2872             }
2873 
2874             rlan_entries[rlan_count].entry.portList[15].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, rlan_tab_data[5]);
2875             rlan_entries[rlan_count].entry.portList[15].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, rlan_tab_data[5]);
2876             if (rlan_entries[rlan_count].entry.portList[15].groupSize == 0)
2877             {
2878                 rlan_entries[rlan_count].entry.portList[15].groupSize = 16;
2879             }
2880 
2881             rlan_entries[rlan_count].entry.entryValid               = DRF_VAL(_INGRESS, _RLANTABDATA5, _ACLVALID, rlan_tab_data[5]);
2882             rlan_entries[rlan_count].entry.numEntries = NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX;
2883             rlan_entries[rlan_count].idx  = table_index;
2884 
2885             rlan_count++;
2886         }
2887 
2888         table_index++;
2889     }
2890 
2891     params->nextIndex  = table_index;
2892     params->numEntries = rlan_count;
2893 
2894     return NVL_SUCCESS;
2895 }
2896 
2897 NvlStatus
2898 nvswitch_ctrl_set_routing_lan_valid_lr10
2899 (
2900     nvswitch_device *device,
2901     NVSWITCH_SET_ROUTING_LAN_VALID *p
2902 )
2903 {
2904     NvU32 rlan_ctrl;
2905     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2906     NvU32 ram_address = p->firstIndex;
2907     NvU32 i;
2908     NvU32 ram_size;
2909 
2910     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2911     {
2912         NVSWITCH_PRINT(device, ERROR,
2913             "%s: NPORT port #%d not valid\n",
2914             __FUNCTION__, p->portNum);
2915         return -NVL_BAD_ARGS;
2916     }
2917 
2918     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2919     if ((p->firstIndex >= ram_size) ||
2920         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2921         (p->firstIndex + p->numEntries > ram_size))
2922     {
2923         NVSWITCH_PRINT(device, ERROR,
2924             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2925             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2926             0, ram_size - 1,
2927             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2928         return -NVL_BAD_ARGS;
2929     }
2930 
2931     // Select RLAN RAM and disable Auto Increament.
2932     rlan_ctrl =
2933         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2934         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2935 
2936     for (i = 0; i < p->numEntries; i++)
2937     {
2938         /* set the RAM address */
2939         rlan_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rlan_ctrl);
2940         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rlan_ctrl);
2941 
2942         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2943         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2944         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2945         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2946         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2947         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2948 
2949         // Set the valid bit in _RLANTABDATA5
2950         rlan_tab_data[5] = FLD_SET_DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,
2951             p->entryValid[i], rlan_tab_data[5]);
2952 
2953         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1, rlan_tab_data[1]);
2954         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2, rlan_tab_data[2]);
2955         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3, rlan_tab_data[3]);
2956         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4, rlan_tab_data[4]);
2957         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5, rlan_tab_data[5]);
2958         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0, rlan_tab_data[0]);
2959     }
2960 
2961     return NVL_SUCCESS;
2962 }
2963 
2964 /*
2965  * @Brief : Send priv ring command and wait for completion
2966  *
2967  * @Description :
2968  *
2969  * @param[in] device        a reference to the device to initialize
2970  * @param[in] cmd           encoded priv ring command
2971  */
2972 NvlStatus
2973 nvswitch_ring_master_cmd_lr10
2974 (
2975     nvswitch_device *device,
2976     NvU32 cmd
2977 )
2978 {
2979     NvU32 value;
2980     NVSWITCH_TIMEOUT timeout;
2981     NvBool           keepPolling;
2982 
2983     NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _RING_COMMAND, cmd);
2984 
2985     nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
2986     do
2987     {
2988         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
2989 
2990         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_COMMAND);
2991         if (FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
2992         {
2993             break;
2994         }
2995 
2996         nvswitch_os_sleep(1);
2997     }
2998     while (keepPolling);
2999 
3000     if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
3001     {
3002         NVSWITCH_PRINT(device, ERROR,
3003             "%s: Timeout waiting for RING_COMMAND == NO_CMD (cmd=0x%x).\n",
3004             __FUNCTION__, cmd);
3005         return -NVL_INITIALIZATION_TOTAL_FAILURE;
3006     }
3007 
3008     return NVL_SUCCESS;
3009 }
3010 
3011 /*
3012  * @brief Process the information read from ROM tables and apply it to device
3013  * settings.
3014  *
3015  * @param[in] device    a reference to the device to query
3016  * @param[in] firmware  Information parsed from ROM tables
3017  */
3018 static void
3019 _nvswitch_process_firmware_info_lr10
3020 (
3021     nvswitch_device *device,
3022     NVSWITCH_FIRMWARE *firmware
3023 )
3024 {
3025     NvU32 idx_link;
3026     NvU64 link_enable_mask;
3027 
3028     if (device->firmware.firmware_size == 0)
3029     {
3030         return;
3031     }
3032 
3033     if (device->firmware.nvlink.link_config_found)
3034     {
3035         link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3036                             (NvU64)device->regkeys.link_enable_mask);
3037         //
3038         // If the link enables were not already overridden by regkey, then
3039         // apply the ROM link enables
3040         //
3041         if (link_enable_mask == NV_U64_MAX)
3042         {
3043             for (idx_link = 0; idx_link < nvswitch_get_num_links(device); idx_link++)
3044             {
3045                 if ((device->firmware.nvlink.link_enable_mask & NVBIT64(idx_link)) == 0)
3046                 {
3047                     device->link[idx_link].valid = NV_FALSE;
3048                 }
3049             }
3050         }
3051     }
3052 }
3053 
3054 void
3055 nvswitch_init_npg_multicast_lr10
3056 (
3057     nvswitch_device *device
3058 )
3059 {
3060     NvU32 idx_npg;
3061     NvU32 idx_nport;
3062     NvU32 nport_mask;
3063 
3064     //
3065     // Walk the NPGs and build the mask of extant NPORTs
3066     //
3067     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
3068     {
3069         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
3070         {
3071             nport_mask = 0;
3072             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
3073             {
3074                 nport_mask |=
3075                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
3076                     NVBIT(idx_nport) : 0x0);
3077             }
3078 
3079             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
3080                 _NPG, _CTRL_PRI_MULTICAST,
3081                 DRF_NUM(_NPG, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3082                 DRF_DEF(_NPG, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3083 
3084             NVSWITCH_NPGPERF_WR32_LR10(device, idx_npg,
3085                 _NPGPERF, _CTRL_PRI_MULTICAST,
3086                 DRF_NUM(_NPGPERF, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3087                 DRF_DEF(_NPGPERF, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3088         }
3089     }
3090 }
3091 
3092 static NvlStatus
3093 nvswitch_clear_nport_rams_lr10
3094 (
3095     nvswitch_device *device
3096 )
3097 {
3098     NvU32 idx_nport;
3099     NvU64 nport_mask = 0;
3100     NvU32 zero_init_mask;
3101     NvU32 val;
3102     NVSWITCH_TIMEOUT timeout;
3103     NvBool           keepPolling;
3104     NvlStatus retval = NVL_SUCCESS;
3105 
3106     // Build the mask of available NPORTs
3107     for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3108     {
3109         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3110         {
3111             nport_mask |= NVBIT64(idx_nport);
3112         }
3113     }
3114 
3115     // Start the HW zero init
3116     zero_init_mask =
3117         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT) |
3118         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_1, _HWINIT) |
3119         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_2, _HWINIT) |
3120         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_3, _HWINIT) |
3121         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_4, _HWINIT) |
3122         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_5, _HWINIT) |
3123         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_6, _HWINIT) |
3124         DRF_DEF(_NPORT, _INITIALIZATION, _LINKTABLEINIT, _HWINIT) |
3125         DRF_DEF(_NPORT, _INITIALIZATION, _REMAPTABINIT,  _HWINIT) |
3126         DRF_DEF(_NPORT, _INITIALIZATION, _RIDTABINIT,    _HWINIT) |
3127         DRF_DEF(_NPORT, _INITIALIZATION, _RLANTABINIT,   _HWINIT);
3128 
3129     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _NPORT, _INITIALIZATION,
3130         zero_init_mask);
3131 
3132     nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
3133 
3134     do
3135     {
3136         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
3137 
3138         // Check each enabled NPORT that is still pending until all are done
3139         for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3140         {
3141             if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport) && (nport_mask & NVBIT64(idx_nport)))
3142             {
3143                 val = NVSWITCH_ENG_RD32_LR10(device, NPORT, idx_nport, _NPORT, _INITIALIZATION);
3144                 if (val == zero_init_mask)
3145                 {
3146                     nport_mask &= ~NVBIT64(idx_nport);
3147                 }
3148             }
3149         }
3150 
3151         if (nport_mask == 0)
3152         {
3153             break;
3154         }
3155 
3156         nvswitch_os_sleep(1);
3157     }
3158     while (keepPolling);
3159 
3160     if (nport_mask != 0)
3161     {
3162         NVSWITCH_PRINT(device, WARN,
3163             "%s: Timeout waiting for NV_NPORT_INITIALIZATION (0x%llx)\n",
3164             __FUNCTION__, nport_mask);
3165         // Bug 2974064: Review this timeout handling (fall through)
3166         retval = -NVL_ERR_INVALID_STATE;
3167     }
3168 
3169     //bug 2737147 requires SW To init this crumbstore setting for LR10
3170     val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0)             |
3171           DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
3172           DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 0)        |
3173           DRF_DEF(_TSTATE, _RAM_ADDRESS, _VC, _VC5_TRANSDONE);
3174 
3175     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _RAM_ADDRESS, val);
3176 
3177     return retval;
3178 }
3179 
3180 static void
3181 _nvswitch_init_nport_ecc_control_lr10
3182 (
3183     nvswitch_device *device
3184 )
3185 {
3186     // Set ingress ECC error limits
3187     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER,
3188         DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3189     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, 1);
3190 
3191     // Set egress ECC error limits
3192     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER,
3193         DRF_NUM(_EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3194     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, 1);
3195 
3196     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER,
3197         DRF_NUM(_EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3198     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, 1);
3199 
3200     // Set route ECC error limits
3201     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER,
3202         DRF_NUM(_ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3203     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER_LIMIT, 1);
3204 
3205     // Set tstate ECC error limits
3206     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3207         DRF_NUM(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3208     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3209 
3210     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3211         DRF_NUM(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3212     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, 1);
3213 
3214     // Set sourcetrack ECC error limits to _PROD value
3215     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3216         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3217     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3218 
3219     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3220         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3221     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3222 
3223     // Enable ECC/parity
3224     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_ECC_CTRL,
3225         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_HDR_ECC_ENABLE, __PROD) |
3226         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD) |
3227         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _REMAPTAB_ECC_ENABLE, __PROD) |
3228         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RIDTAB_ECC_ENABLE, __PROD) |
3229         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RLANTAB_ECC_ENABLE, __PROD));
3230 
3231     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_ECC_CTRL,
3232         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_ECC_ENABLE, __PROD) |
3233         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_PARITY_ENABLE, __PROD) |
3234         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _RAM_OUT_ECC_ENABLE, __PROD) |
3235         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_ECC_ENABLE, __PROD) |
3236         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD));
3237 
3238     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_ECC_CTRL,
3239         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _GLT_ECC_ENABLE, __PROD) |
3240         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _NVS_ECC_ENABLE, __PROD));
3241 
3242     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_ECC_CTRL,
3243         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _CRUMBSTORE_ECC_ENABLE, __PROD) |
3244         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TAGPOOL_ECC_ENABLE, __PROD) |
3245         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TD_TID_ECC_ENABLE, _DISABLE));
3246 
3247     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_ECC_CTRL,
3248         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD) |
3249         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE, _DISABLE) |
3250         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE, __PROD));
3251 }
3252 
3253 static void
3254 _nvswitch_init_cmd_routing
3255 (
3256     nvswitch_device *device
3257 )
3258 {
3259     NvU32 val;
3260 
3261     //Set Hash policy for the requests.
3262     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN1, _SPRAY) |
3263           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN2, _SPRAY) |
3264           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN4, _SPRAY) |
3265           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN7, _SPRAY);
3266     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE0, val);
3267 
3268     // Set Random policy for reponses.
3269     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN16, _RANDOM) |
3270           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN17, _RANDOM);
3271     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE2, val);
3272 }
3273 
3274 static NvlStatus
3275 _nvswitch_init_portstat_counters
3276 (
3277     nvswitch_device *device
3278 )
3279 {
3280     NvlStatus retval;
3281     NvU32 idx_channel;
3282     NVSWITCH_SET_LATENCY_BINS default_latency_bins;
3283     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3284 
3285     chip_device->latency_stats = nvswitch_os_malloc(sizeof(NVSWITCH_LATENCY_STATS_LR10));
3286     if (chip_device->latency_stats == NULL)
3287     {
3288         NVSWITCH_PRINT(device, ERROR, "%s: Failed allocate memory for latency stats\n",
3289             __FUNCTION__);
3290         return -NVL_NO_MEM;
3291     }
3292 
3293     nvswitch_os_memset(chip_device->latency_stats, 0, sizeof(NVSWITCH_LATENCY_STATS_LR10));
3294 
3295     //
3296     // These bin thresholds are values provided by Arch based off
3297     // switch latency expectations.
3298     //
3299     for (idx_channel=0; idx_channel < NVSWITCH_NUM_VCS_LR10; idx_channel++)
3300     {
3301         default_latency_bins.bin[idx_channel].lowThreshold = 120;    // 120ns
3302         default_latency_bins.bin[idx_channel].medThreshold = 200;    // 200ns
3303         default_latency_bins.bin[idx_channel].hiThreshold  = 1000;   // 1us
3304     }
3305 
3306     chip_device->latency_stats->sample_interval_msec = 3000; // 3 second sample interval
3307 
3308     retval = nvswitch_ctrl_set_latency_bins(device, &default_latency_bins);
3309     if (retval != NVL_SUCCESS)
3310     {
3311         NVSWITCH_PRINT(device, ERROR, "%s: Failed to set latency bins\n",
3312             __FUNCTION__);
3313         NVSWITCH_ASSERT(0);
3314         return retval;
3315     }
3316 
3317     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_CONTROL,
3318         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _SWEEPMODE, _SWONDEMAND) |
3319         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _RANGESELECT, _BITS13TO0));
3320 
3321      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_0,
3322          DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_0, _SRCFILTERBIT, 0xFFFFFFFF));
3323 
3324     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_1,
3325         DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_1, _SRCFILTERBIT, 0xF));
3326 
3327     // Set window limit to the maximum value
3328     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_WINDOW_LIMIT, 0xffffffff);
3329 
3330      NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _GLBLLATENCYTIMERCTRL,
3331          DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, _ENABLE));
3332 
3333      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
3334          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
3335          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
3336 
3337      return NVL_SUCCESS;
3338 }
3339 
3340 NvlStatus
3341 nvswitch_init_nxbar_lr10
3342 (
3343     nvswitch_device *device
3344 )
3345 {
3346     NvU32 tileout;
3347 
3348     // Setting this bit will send error detection info to NPG.
3349     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_ERR_CYA,
3350         DRF_DEF(_NXBAR, _TILE_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3351 
3352     for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++)
3353     {
3354         NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_TILEOUT_ERR_CYA(tileout),
3355             DRF_DEF(_NXBAR, _TC_TILEOUT0_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3356     }
3357 
3358     // Enable idle-based clk gating and setup delay count.
3359     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_PRI_NXBAR_TILE_CG,
3360         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_EN, __PROD) |
3361         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_DLY_CNT, __PROD));
3362 
3363     NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_PRI_NXBAR_TC_CG,
3364         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_EN, __PROD) |
3365         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_DLY_CNT, __PROD));
3366 
3367     return NVL_SUCCESS;
3368 }
3369 
3370 NvlStatus
3371 nvswitch_init_nport_lr10
3372 (
3373     nvswitch_device *device
3374 )
3375 {
3376     NvU32 data32, timeout;
3377     NvU32 idx_nport;
3378     NvU32 num_nports;
3379 
3380     num_nports = NVSWITCH_ENG_COUNT(device, NPORT, );
3381 
3382     for (idx_nport = 0; idx_nport < num_nports; idx_nport++)
3383     {
3384         // Find the first valid nport
3385         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3386         {
3387             break;
3388         }
3389     }
3390 
3391     // There were no valid nports
3392     if (idx_nport == num_nports)
3393     {
3394         NVSWITCH_PRINT(device, ERROR, "%s: No valid nports found!\n", __FUNCTION__);
3395         return -NVL_ERR_INVALID_STATE;
3396     }
3397 
3398     _nvswitch_init_nport_ecc_control_lr10(device);
3399 
3400     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _ROUTE, _ROUTE_CONTROL);
3401     data32 = FLD_SET_DRF(_ROUTE, _ROUTE_CONTROL, _URRESPENB, __PROD, data32);
3402     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ROUTE_CONTROL, data32);
3403 
3404     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _EGRESS, _CTRL);
3405     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _DESTINATIONIDCHECKENB, __PROD, data32);
3406     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _CTO_ENB, __PROD, data32);
3407     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTRL, data32);
3408 
3409     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTO_TIMER_LIMIT,
3410         DRF_DEF(_EGRESS, _CTO_TIMER_LIMIT, _LIMIT, __PROD));
3411 
3412     if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) ==
3413         NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE)
3414     {
3415         // ATO Disable
3416         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3417         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _OFF, data32);
3418         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3419     }
3420     else
3421     {
3422         // ATO Enable
3423         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3424         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _ON, data32);
3425         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3426 
3427         // ATO Timeout value
3428         timeout = DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _TIMEOUT, device->regkeys.ato_control);
3429         if (timeout != NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT)
3430         {
3431             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3432                 DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout));
3433         }
3434         else
3435         {
3436             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3437                 DRF_DEF(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, __PROD));
3438         }
3439     }
3440 
3441     if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) ==
3442         NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE)
3443     {
3444         // STO Disable
3445         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3446         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _OFF, data32);
3447         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3448     }
3449     else
3450     {
3451         // STO Enable
3452         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3453         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _ON, data32);
3454         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3455 
3456         // STO Timeout value
3457         timeout = DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _TIMEOUT, device->regkeys.sto_control);
3458         if (timeout != NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT)
3459         {
3460             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3461                 DRF_NUM(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, timeout));
3462         }
3463         else
3464         {
3465             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3466                 DRF_DEF(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, __PROD));
3467         }
3468     }
3469 
3470     //
3471     // WAR for bug 200606509
3472     // Disable CAM for entry 0 to prevent false ATO trigger
3473     //
3474     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _CREQ_CAM_LOCK);
3475     data32 = DRF_NUM(_TSTATE, _CREQ_CAM_LOCK, _ON, 0x1);
3476     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _CREQ_CAM_LOCK, data32);
3477 
3478     //
3479     // WAR for bug 3115824
3480     // Clear CONTAIN_AND_DRAIN during init for links in reset.
3481     // Since SBR does not clear CONTAIN_AND_DRAIN, this will clear the bit
3482     // when the driver is reloaded after an SBR. If the driver has been reloaded
3483     // without an SBR, then CONTAIN_AND_DRAIN will be re-triggered.
3484     //
3485     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CONTAIN_AND_DRAIN,
3486         DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
3487 
3488     return NVL_SUCCESS;
3489 }
3490 
3491 void *
3492 nvswitch_alloc_chipdevice_lr10
3493 (
3494     nvswitch_device *device
3495 )
3496 {
3497     void *chip_device;
3498 
3499     chip_device = nvswitch_os_malloc(sizeof(lr10_device));
3500     if (NULL != chip_device)
3501     {
3502         nvswitch_os_memset(chip_device, 0, sizeof(lr10_device));
3503     }
3504 
3505     device->chip_id = NV_PSMC_BOOT_42_CHIP_ID_LR10;
3506     return(chip_device);
3507 }
3508 
3509 static NvlStatus
3510 nvswitch_initialize_pmgr_lr10
3511 (
3512     nvswitch_device *device
3513 )
3514 {
3515     nvswitch_init_pmgr_lr10(device);
3516     nvswitch_init_pmgr_devices_lr10(device);
3517 
3518     return NVL_SUCCESS;
3519 }
3520 
3521 static NvlStatus
3522 nvswitch_initialize_route_lr10
3523 (
3524     nvswitch_device *device
3525 )
3526 {
3527     NvlStatus retval;
3528 
3529     retval = _nvswitch_init_ganged_link_routing(device);
3530     if (NVL_SUCCESS != retval)
3531     {
3532         NVSWITCH_PRINT(device, ERROR,
3533             "%s: Failed to initialize GLT\n",
3534             __FUNCTION__);
3535         goto nvswitch_initialize_route_exit;
3536     }
3537 
3538     _nvswitch_init_cmd_routing(device);
3539 
3540     // Initialize Portstat Counters
3541     retval = _nvswitch_init_portstat_counters(device);
3542     if (NVL_SUCCESS != retval)
3543     {
3544         NVSWITCH_PRINT(device, ERROR,
3545             "%s: Failed to initialize portstat counters\n",
3546             __FUNCTION__);
3547         goto nvswitch_initialize_route_exit;
3548     }
3549 
3550 nvswitch_initialize_route_exit:
3551     return retval;
3552 }
3553 
3554 
3555 NvlStatus
3556 nvswitch_pri_ring_init_lr10
3557 (
3558     nvswitch_device *device
3559 )
3560 {
3561     NvU32 i;
3562     NvU32 value;
3563     NvBool enumerated = NV_FALSE;
3564     NvlStatus retval = NVL_SUCCESS;
3565 
3566     //
3567     // Sometimes on RTL simulation we see the priv ring initialization fail.
3568     // Retry up to 3 times until this issue is root caused. Bug 1826216.
3569     //
3570     for (i = 0; !enumerated && (i < 3); i++)
3571     {
3572         value = DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ENUMERATE_AND_START_RING);
3573         retval = nvswitch_ring_master_cmd_lr10(device, value);
3574         if (retval != NVL_SUCCESS)
3575         {
3576             NVSWITCH_PRINT(device, ERROR,
3577                 "%s: PRIV ring enumeration failed\n",
3578                 __FUNCTION__);
3579             continue;
3580         }
3581 
3582         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_START_RESULTS);
3583         if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_START_RESULTS, _CONNECTIVITY, _PASS, value))
3584         {
3585             NVSWITCH_PRINT(device, ERROR,
3586                 "%s: PRIV ring connectivity failed\n",
3587                 __FUNCTION__);
3588             continue;
3589         }
3590 
3591         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0);
3592         if (value)
3593         {
3594             NVSWITCH_PRINT(device, ERROR,
3595                 "%s: NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0 = %x\n",
3596                 __FUNCTION__, value);
3597 
3598             if ((!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3599                     _RING_START_CONN_FAULT, 0, value)) ||
3600                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3601                     _DISCONNECT_FAULT, 0, value))      ||
3602                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3603                     _OVERFLOW_FAULT, 0, value)))
3604             {
3605                 NVSWITCH_PRINT(device, ERROR,
3606                     "%s: PRIV ring error interrupt\n",
3607                     __FUNCTION__);
3608             }
3609 
3610             (void)nvswitch_ring_master_cmd_lr10(device,
3611                     DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT));
3612 
3613             continue;
3614         }
3615 
3616         enumerated = NV_TRUE;
3617     }
3618 
3619     if (!enumerated)
3620     {
3621         NVSWITCH_PRINT(device, ERROR,
3622             "%s: Cannot enumerate PRIV ring!\n",
3623             __FUNCTION__);
3624         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3625     }
3626 
3627     return retval;
3628 }
3629 
3630 /*
3631  * @Brief : Initializes an NvSwitch hardware state
3632  *
3633  * @Description :
3634  *
3635  * @param[in] device        a reference to the device to initialize
3636  *
3637  * @returns                 NVL_SUCCESS if the action succeeded
3638  *                          -NVL_BAD_ARGS if bad arguments provided
3639  *                          -NVL_PCI_ERROR if bar info unable to be retrieved
3640  */
3641 NvlStatus
3642 nvswitch_initialize_device_state_lr10
3643 (
3644     nvswitch_device *device
3645 )
3646 {
3647     NvlStatus retval = NVL_SUCCESS;
3648 
3649     // alloc chip-specific device structure
3650     device->chip_device = nvswitch_alloc_chipdevice(device);
3651     if (NULL == device->chip_device)
3652     {
3653         NVSWITCH_PRINT(device, ERROR,
3654             "nvswitch_os_malloc during chip_device creation failed!\n");
3655         retval = -NVL_NO_MEM;
3656         goto nvswitch_initialize_device_state_exit;
3657     }
3658 
3659     NVSWITCH_PRINT(device, SETUP,
3660         "%s: MMIO discovery\n",
3661         __FUNCTION__);
3662     retval = nvswitch_device_discovery(device, NV_SWPTOP_TABLE_BASE_ADDRESS_OFFSET);
3663     if (NVL_SUCCESS != retval)
3664     {
3665         NVSWITCH_PRINT(device, ERROR,
3666             "%s: Engine discovery failed\n",
3667             __FUNCTION__);
3668         goto nvswitch_initialize_device_state_exit;
3669     }
3670 
3671     nvswitch_filter_discovery(device);
3672 
3673     retval = nvswitch_process_discovery(device);
3674     if (NVL_SUCCESS != retval)
3675     {
3676         NVSWITCH_PRINT(device, ERROR,
3677             "%s: Discovery processing failed\n",
3678             __FUNCTION__);
3679         goto nvswitch_initialize_device_state_exit;
3680     }
3681 
3682     // now that we have completed discovery, perform initialization steps that
3683     // depend on engineDescriptors being initialized
3684     //
3685     // Temporary location, really needs to be done somewhere common to all flcnables
3686     if (nvswitch_is_soe_supported(device))
3687     {
3688         flcnablePostDiscoveryInit(device, device->pSoe);
3689     }
3690     else
3691     {
3692         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE post discovery init.\n",
3693             __FUNCTION__);
3694     }
3695 
3696     // Make sure interrupts are disabled before we enable interrupts with the OS.
3697     nvswitch_lib_disable_interrupts(device);
3698 
3699     retval = nvswitch_pri_ring_init(device);
3700     if (retval != NVL_SUCCESS)
3701     {
3702         NVSWITCH_PRINT(device, ERROR, "%s: PRI init failed\n", __FUNCTION__);
3703         goto nvswitch_initialize_device_state_exit;
3704     }
3705 
3706     NVSWITCH_PRINT(device, SETUP,
3707         "%s: Enabled links: 0x%llx\n",
3708         __FUNCTION__,
3709         ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3710         (NvU64)device->regkeys.link_enable_mask) &
3711         ((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device))));
3712 
3713     if (nvswitch_is_soe_supported(device))
3714     {
3715         retval = nvswitch_init_soe(device);
3716         if (NVL_SUCCESS != retval)
3717         {
3718             NVSWITCH_PRINT(device, ERROR, "%s: Init SOE failed\n",
3719                 __FUNCTION__);
3720             goto nvswitch_initialize_device_state_exit;
3721         }
3722     }
3723     else
3724     {
3725         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE init.\n",
3726             __FUNCTION__);
3727     }
3728 
3729     // Read ROM configuration
3730     nvswitch_read_rom_tables(device, &device->firmware);
3731     _nvswitch_process_firmware_info_lr10(device, &device->firmware);
3732 
3733     // Init PMGR info
3734     retval = nvswitch_initialize_pmgr(device);
3735     if (retval != NVL_SUCCESS)
3736     {
3737         NVSWITCH_PRINT(device, ERROR,
3738             "%s: PMGR init failed\n", __FUNCTION__);
3739         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3740         goto nvswitch_initialize_device_state_exit;
3741     }
3742 
3743     retval = nvswitch_init_pll_config(device);
3744     if (retval != NVL_SUCCESS)
3745     {
3746         NVSWITCH_PRINT(device, ERROR,
3747             "%s: failed\n", __FUNCTION__);
3748         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3749         goto nvswitch_initialize_device_state_exit;
3750     }
3751 
3752     //
3753     // PLL init should be done *first* before other hardware init
3754     //
3755     retval = nvswitch_init_pll(device);
3756     if (NVL_SUCCESS != retval)
3757     {
3758         NVSWITCH_PRINT(device, ERROR,
3759             "%s: PLL init failed\n",
3760             __FUNCTION__);
3761         goto nvswitch_initialize_device_state_exit;
3762     }
3763 
3764     //
3765     // Now that software knows the devices and addresses, it must take all
3766     // the wrapper modules out of reset.  It does this by writing to the
3767     // PMC module enable registers.
3768     //
3769 
3770     // Init IP wrappers
3771 //    _nvswitch_init_mc_enable_lr10(device);
3772     retval = nvswitch_initialize_ip_wrappers(device);
3773     if (retval != NVL_SUCCESS)
3774     {
3775         NVSWITCH_PRINT(device, ERROR,
3776             "%s: init failed\n", __FUNCTION__);
3777         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3778         goto nvswitch_initialize_device_state_exit;
3779     }
3780 
3781     nvswitch_init_warm_reset(device);
3782     nvswitch_init_npg_multicast(device);
3783     retval = nvswitch_clear_nport_rams(device);
3784     if (NVL_SUCCESS != retval)
3785     {
3786         NVSWITCH_PRINT(device, ERROR,
3787             "%s: NPORT RAM clear failed\n",
3788             __FUNCTION__);
3789         goto nvswitch_initialize_device_state_exit;
3790     }
3791 
3792     retval = nvswitch_init_nport(device);
3793     if (retval != NVL_SUCCESS)
3794     {
3795         NVSWITCH_PRINT(device, ERROR,
3796             "%s: Init NPORTs failed\n",
3797             __FUNCTION__);
3798         goto nvswitch_initialize_device_state_exit;
3799     }
3800 
3801     retval = nvswitch_init_nxbar(device);
3802     if (retval != NVL_SUCCESS)
3803     {
3804         NVSWITCH_PRINT(device, ERROR,
3805             "%s: Init NXBARs failed\n",
3806             __FUNCTION__);
3807         goto nvswitch_initialize_device_state_exit;
3808     }
3809 
3810     if (device->regkeys.minion_disable != NV_SWITCH_REGKEY_MINION_DISABLE_YES)
3811     {
3812         NVSWITCH_PRINT(device, WARN, "%s: Entering init minion\n", __FUNCTION__);
3813 
3814         retval = nvswitch_init_minion(device);
3815         if (NVL_SUCCESS != retval)
3816         {
3817             NVSWITCH_PRINT(device, ERROR,
3818                 "%s: Init MINIONs failed\n",
3819                 __FUNCTION__);
3820             goto nvswitch_initialize_device_state_exit;
3821         }
3822     }
3823     else
3824     {
3825         NVSWITCH_PRINT(device, INFO, "MINION is disabled via regkey.\n");
3826 
3827         NVSWITCH_PRINT(device, INFO, "%s: Skipping MINION init\n",
3828             __FUNCTION__);
3829     }
3830 
3831     _nvswitch_setup_chiplib_forced_config_lr10(device);
3832 
3833     // Init route
3834     retval = nvswitch_initialize_route(device);
3835     if (retval != NVL_SUCCESS)
3836     {
3837         NVSWITCH_PRINT(device, ERROR,
3838             "%s: route init failed\n", __FUNCTION__);
3839         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3840         goto nvswitch_initialize_device_state_exit;
3841     }
3842 
3843     nvswitch_init_clock_gating(device);
3844 
3845     // Initialize SPI
3846     if (nvswitch_is_spi_supported(device))
3847     {
3848         retval = nvswitch_spi_init(device);
3849         if (NVL_SUCCESS != retval)
3850         {
3851             NVSWITCH_PRINT(device, ERROR,
3852                 "%s: SPI init failed!, rc: %d\n",
3853                 __FUNCTION__, retval);
3854             goto nvswitch_initialize_device_state_exit;
3855         }
3856     }
3857     else
3858     {
3859         NVSWITCH_PRINT(device, WARN,
3860             "%s: Skipping SPI init.\n",
3861             __FUNCTION__);
3862     }
3863 
3864     // Initialize SMBPBI
3865     if (nvswitch_is_smbpbi_supported(device))
3866     {
3867         retval = nvswitch_smbpbi_init(device);
3868         if (NVL_SUCCESS != retval)
3869         {
3870             NVSWITCH_PRINT(device, ERROR,
3871                 "%s: SMBPBI init failed!, rc: %d\n",
3872                 __FUNCTION__, retval);
3873             goto nvswitch_initialize_device_state_exit;
3874         }
3875     }
3876     else
3877     {
3878         NVSWITCH_PRINT(device, WARN,
3879             "%s: Skipping SMBPBI init.\n",
3880             __FUNCTION__);
3881     }
3882 
3883     nvswitch_initialize_interrupt_tree(device);
3884 
3885     // Initialize external thermal sensor
3886     retval = nvswitch_init_thermal(device);
3887     if (NVL_SUCCESS != retval)
3888     {
3889         NVSWITCH_PRINT(device, ERROR,
3890             "%s: External Thermal init failed\n",
3891             __FUNCTION__);
3892     }
3893 
3894     return NVL_SUCCESS;
3895 
3896 nvswitch_initialize_device_state_exit:
3897     nvswitch_destroy_device_state(device);
3898 
3899     return retval;
3900 }
3901 
3902 /*
3903  * @Brief : Destroys an NvSwitch hardware state
3904  *
3905  * @Description :
3906  *
3907  * @param[in] device        a reference to the device to initialize
3908  */
3909 void
3910 nvswitch_destroy_device_state_lr10
3911 (
3912     nvswitch_device *device
3913 )
3914 {
3915     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3916 
3917     if (nvswitch_is_soe_supported(device))
3918     {
3919         nvswitch_soe_unregister_events(device);
3920     }
3921 
3922     if (chip_device != NULL)
3923     {
3924         if ((chip_device->latency_stats) != NULL)
3925         {
3926             nvswitch_os_free(chip_device->latency_stats);
3927         }
3928 
3929         if ((chip_device->ganged_link_table) != NULL)
3930         {
3931             nvswitch_os_free(chip_device->ganged_link_table);
3932         }
3933 
3934         nvswitch_free_chipdevice(device);
3935     }
3936 
3937     nvswitch_i2c_destroy(device);
3938 
3939     return;
3940 }
3941 
3942 static void
3943 _nvswitch_set_nvlink_caps_lr10
3944 (
3945     NvU32 *pCaps
3946 )
3947 {
3948     NvU8 tempCaps[NVSWITCH_NVLINK_CAPS_TBL_SIZE];
3949 
3950     nvswitch_os_memset(tempCaps, 0, sizeof(tempCaps));
3951 
3952     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _VALID);
3953     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SUPPORTED);
3954     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_SUPPORTED);
3955     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_ATOMICS);
3956 
3957     // Assume IBM P9 for PPC -- TODO Xavier support.
3958 #if defined(NVCPU_PPC64LE)
3959     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ACCESS);
3960     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ATOMICS);
3961 #endif
3962 
3963     nvswitch_os_memcpy(pCaps, tempCaps, sizeof(tempCaps));
3964 }
3965 
3966 /*
3967  * @brief Determines if a link's lanes are reversed
3968  *
3969  * @param[in] device    a reference to the device to query
3970  * @param[in] linkId    Target link ID
3971  *
3972  * @return NV_TRUE if a link's lanes are reversed
3973  */
3974 NvBool
3975 nvswitch_link_lane_reversed_lr10
3976 (
3977     nvswitch_device *device,
3978     NvU32            linkId
3979 )
3980 {
3981     NvU32 regData;
3982     nvlink_link *link;
3983 
3984     link = nvswitch_get_link(device, linkId);
3985     if ((link == NULL) || nvswitch_is_link_in_reset(device, link))
3986     {
3987         return NV_FALSE;
3988     }
3989 
3990     regData = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_RX, _CONFIG_RX);
3991 
3992     // HW may reverse the lane ordering or it may be overridden by SW.
3993     if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _REVERSAL_OVERRIDE, _ON, regData))
3994     {
3995         // Overridden
3996         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _LANE_REVERSE, _ON, regData))
3997         {
3998             return NV_TRUE;
3999         }
4000         else
4001         {
4002             return NV_FALSE;
4003         }
4004     }
4005     else
4006     {
4007         // Sensed in HW
4008         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _HW_LANE_REVERSE, _ON, regData))
4009         {
4010             return NV_TRUE;
4011         }
4012         else
4013         {
4014             return NV_FALSE;
4015         }
4016     }
4017 
4018     return NV_FALSE;
4019 }
4020 
4021 NvlStatus
4022 nvswitch_ctrl_get_nvlink_status_lr10
4023 (
4024     nvswitch_device *device,
4025     NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret
4026 )
4027 {
4028     NvlStatus retval = NVL_SUCCESS;
4029     nvlink_link *link;
4030     NvU8 i;
4031     NvU32 linkState, txSublinkStatus, rxSublinkStatus;
4032     nvlink_conn_info conn_info = {0};
4033     NvU64 enabledLinkMask;
4034     NvU32 nvlink_caps_version;
4035 
4036     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
4037     ret->enabledLinkMask = enabledLinkMask;
4038 
4039     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
4040     {
4041         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
4042 
4043         link = nvswitch_get_link(device, i);
4044 
4045         if ((link == NULL) ||
4046             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
4047             (i >= NVSWITCH_NVLINK_MAX_LINKS))
4048         {
4049             continue;
4050         }
4051 
4052         //
4053         // Call the core library to get the remote end information. On the first
4054         // invocation this will also trigger link training, if link-training is
4055         // not externally managed by FM. Therefore it is necessary that this be
4056         // before link status on the link is populated since this call will
4057         // actually change link state.
4058         //
4059         if (device->regkeys.external_fabric_mgmt)
4060         {
4061             nvlink_lib_get_remote_conn_info(link, &conn_info);
4062         }
4063         else
4064         {
4065             nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info, NVLINK_STATE_CHANGE_SYNC);
4066         }
4067 
4068         // Set NVLINK per-link caps
4069         _nvswitch_set_nvlink_caps_lr10(&ret->linkInfo[i].capsTbl);
4070 
4071         ret->linkInfo[i].phyType = NVSWITCH_NVLINK_STATUS_PHY_NVHS;
4072         ret->linkInfo[i].subLinkWidth = nvswitch_get_sublink_width(device, link->linkNumber);
4073 
4074         if (!nvswitch_is_link_in_reset(device, link))
4075         {
4076             linkState = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE);
4077             linkState = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, linkState);
4078 
4079             txSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
4080             txSublinkStatus = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, txSublinkStatus);
4081 
4082             rxSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
4083             rxSublinkStatus = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, rxSublinkStatus);
4084 
4085             ret->linkInfo[i].bLaneReversal = nvswitch_link_lane_reversed_lr10(device, i);
4086         }
4087         else
4088         {
4089             linkState       = NVSWITCH_NVLINK_STATUS_LINK_STATE_INIT;
4090             txSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_OFF;
4091             rxSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_OFF;
4092         }
4093 
4094         ret->linkInfo[i].linkState       = linkState;
4095         ret->linkInfo[i].txSublinkStatus = txSublinkStatus;
4096         ret->linkInfo[i].rxSublinkStatus = rxSublinkStatus;
4097 
4098         nvlink_caps_version = nvswitch_get_caps_nvlink_version(device);
4099         if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0)
4100         {
4101             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0;
4102             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_0;
4103         }
4104         else if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_4_0)
4105         {
4106             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_4_0;
4107             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_4_0;
4108         }
4109         else
4110         {
4111             NVSWITCH_PRINT(device, WARN,
4112                 "%s WARNING: Unknown NVSWITCH_NVLINK_CAPS_NVLINK_VERSION 0x%x\n",
4113                 __FUNCTION__, nvlink_caps_version);
4114             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_INVALID;
4115             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_INVALID;
4116         }
4117 
4118         ret->linkInfo[i].phyVersion = NVSWITCH_NVLINK_STATUS_NVHS_VERSION_1_0;
4119 
4120         if (conn_info.bConnected)
4121         {
4122             ret->linkInfo[i].connected = NVSWITCH_NVLINK_STATUS_CONNECTED_TRUE;
4123             ret->linkInfo[i].remoteDeviceLinkNumber = (NvU8)conn_info.linkNumber;
4124 
4125             ret->linkInfo[i].remoteDeviceInfo.domain = conn_info.domain;
4126             ret->linkInfo[i].remoteDeviceInfo.bus = conn_info.bus;
4127             ret->linkInfo[i].remoteDeviceInfo.device = conn_info.device;
4128             ret->linkInfo[i].remoteDeviceInfo.function = conn_info.function;
4129             ret->linkInfo[i].remoteDeviceInfo.pciDeviceId = conn_info.pciDeviceId;
4130             ret->linkInfo[i].remoteDeviceInfo.deviceType = conn_info.deviceType;
4131 
4132             ret->linkInfo[i].localLinkSid  = link->localSid;
4133             ret->linkInfo[i].remoteLinkSid = link->remoteSid;
4134 
4135             if (0 != conn_info.pciDeviceId)
4136             {
4137                 ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags =
4138                     FLD_SET_DRF(SWITCH_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS,
4139                          _PCI, ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags);
4140             }
4141 
4142             // Does not use loopback
4143             ret->linkInfo[i].loopProperty =
4144                 NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_NONE;
4145         }
4146         else
4147         {
4148             ret->linkInfo[i].connected =
4149                 NVSWITCH_NVLINK_STATUS_CONNECTED_FALSE;
4150             ret->linkInfo[i].remoteDeviceInfo.deviceType =
4151                 NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE;
4152         }
4153 
4154         // Set the device information for the local end of the link
4155         ret->linkInfo[i].localDeviceInfo.domain = device->nvlink_device->pciInfo.domain;
4156         ret->linkInfo[i].localDeviceInfo.bus = device->nvlink_device->pciInfo.bus;
4157         ret->linkInfo[i].localDeviceInfo.device = device->nvlink_device->pciInfo.device;
4158         ret->linkInfo[i].localDeviceInfo.function = device->nvlink_device->pciInfo.function;
4159         ret->linkInfo[i].localDeviceInfo.pciDeviceId = 0xdeadbeef; // TODO
4160         ret->linkInfo[i].localDeviceLinkNumber = i;
4161         ret->linkInfo[i].laneRxdetStatusMask = device->link[i].lane_rxdet_status_mask;
4162         ret->linkInfo[i].localDeviceInfo.deviceType =
4163             NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH;
4164 
4165         // Clock data
4166         ret->linkInfo[i].nvlinkLineRateMbps = nvswitch_minion_get_line_rate_Mbps_lr10(device, i);
4167         ret->linkInfo[i].nvlinkLinkDataRateKiBps = nvswitch_minion_get_data_rate_KiBps_lr10(device, i);
4168         ret->linkInfo[i].nvlinkLinkClockMhz = ret->linkInfo[i].nvlinkLineRateMbps / 32;
4169         ret->linkInfo[i].nvlinkRefClkSpeedMhz = 156;
4170         ret->linkInfo[i].nvlinkRefClkType = NVSWITCH_NVLINK_REFCLK_TYPE_NVHS;
4171 
4172     }
4173     FOR_EACH_INDEX_IN_MASK_END;
4174 
4175 //    NVSWITCH_ASSERT(ret->enabledLinkMask == enabledLinkMask);
4176 
4177     return retval;
4178 }
4179 
4180 NvlStatus
4181 nvswitch_ctrl_get_counters_lr10
4182 (
4183     nvswitch_device *device,
4184     NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret
4185 )
4186 {
4187     nvlink_link *link;
4188     NvU8   i;
4189     NvU32  counterMask;
4190     NvU32  data;
4191     NvU32  val;
4192     NvU64  tx0TlCount;
4193     NvU64  tx1TlCount;
4194     NvU64  rx0TlCount;
4195     NvU64  rx1TlCount;
4196     NvU32  laneId;
4197     NvBool bLaneReversed;
4198     NvlStatus status;
4199     NvBool minion_enabled;
4200 
4201     ct_assert(NVSWITCH_NUM_LANES_LR10 <= NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE);
4202 
4203     link = nvswitch_get_link(device, ret->linkId);
4204     if ((link == NULL) ||
4205         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
4206     {
4207         return -NVL_BAD_ARGS;
4208     }
4209 
4210     minion_enabled = nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
4211 
4212     counterMask = ret->counterMask;
4213 
4214     // Common usage allows one of these to stand for all of them
4215     if (counterMask & (NVSWITCH_NVLINK_COUNTER_TL_TX0 |
4216                        NVSWITCH_NVLINK_COUNTER_TL_TX1 |
4217                        NVSWITCH_NVLINK_COUNTER_TL_RX0 |
4218                        NVSWITCH_NVLINK_COUNTER_TL_RX1))
4219     {
4220         tx0TlCount = nvswitch_read_64bit_counter(device,
4221             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
4222             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
4223         if (NVBIT64(63) & tx0TlCount)
4224         {
4225             ret->bTx0TlCounterOverflow = NV_TRUE;
4226             tx0TlCount &= ~(NVBIT64(63));
4227         }
4228 
4229         tx1TlCount = nvswitch_read_64bit_counter(device,
4230             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(1)),
4231             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(1)));
4232         if (NVBIT64(63) & tx1TlCount)
4233         {
4234             ret->bTx1TlCounterOverflow = NV_TRUE;
4235             tx1TlCount &= ~(NVBIT64(63));
4236         }
4237 
4238         rx0TlCount = nvswitch_read_64bit_counter(device,
4239             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
4240             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
4241         if (NVBIT64(63) & rx0TlCount)
4242         {
4243             ret->bRx0TlCounterOverflow = NV_TRUE;
4244             rx0TlCount &= ~(NVBIT64(63));
4245         }
4246 
4247         rx1TlCount = nvswitch_read_64bit_counter(device,
4248             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(1)),
4249             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(1)));
4250         if (NVBIT64(63) & rx1TlCount)
4251         {
4252             ret->bRx1TlCounterOverflow = NV_TRUE;
4253             rx1TlCount &= ~(NVBIT64(63));
4254         }
4255 
4256         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX0)] = tx0TlCount;
4257         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX1)] = tx1TlCount;
4258         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX0)] = rx0TlCount;
4259         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX1)] = rx1TlCount;
4260     }
4261 
4262     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4263     {
4264         if (minion_enabled)
4265         {
4266             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4267                                     NV_NVLSTAT_RX01, 0, &data);
4268             if (status != NVL_SUCCESS)
4269             {
4270                 return status;
4271             }
4272             data = DRF_VAL(_NVLSTAT, _RX01, _FLIT_CRC_ERRORS_VALUE, data);
4273         }
4274         else
4275         {
4276             // MINION disabled
4277             data = 0;
4278         }
4279 
4280         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)]
4281             = data;
4282     }
4283 
4284     data = 0x0;
4285     bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
4286 
4287     for (laneId = 0; laneId < NVSWITCH_NUM_LANES_LR10; laneId++)
4288     {
4289         //
4290         // HW may reverse the lane ordering or it may be overridden by SW.
4291         // If so, invert the interpretation of the lane CRC errors.
4292         //
4293         i = (NvU8)((bLaneReversed) ? (NVSWITCH_NUM_LANES_LR10 - 1) - laneId : laneId);
4294 
4295         if (minion_enabled)
4296         {
4297             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4298                                     NV_NVLSTAT_DB01, 0, &data);
4299             if (status != NVL_SUCCESS)
4300             {
4301                 return status;
4302             }
4303         }
4304         else
4305         {
4306             // MINION disabled
4307             data = 0;
4308         }
4309 
4310         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId))
4311         {
4312             val = BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId));
4313 
4314             switch (i)
4315             {
4316                 case 0:
4317                     ret->nvlinkCounters[val]
4318                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L0, data);
4319                     break;
4320                 case 1:
4321                     ret->nvlinkCounters[val]
4322                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L1, data);
4323                     break;
4324                 case 2:
4325                     ret->nvlinkCounters[val]
4326                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L2, data);
4327                     break;
4328                 case 3:
4329                     ret->nvlinkCounters[val]
4330                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L3, data);
4331                     break;
4332             }
4333         }
4334     }
4335 
4336     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4337     {
4338         if (minion_enabled)
4339         {
4340             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4341                                     NV_NVLSTAT_TX09, 0, &data);
4342             if (status != NVL_SUCCESS)
4343             {
4344                 return status;
4345             }
4346             data = DRF_VAL(_NVLSTAT, _TX09, _REPLAY_EVENTS_VALUE, data);
4347         }
4348         else
4349         {
4350             // MINION disabled
4351             data = 0;
4352         }
4353 
4354         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)]
4355             = data;
4356     }
4357 
4358     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4359     {
4360         if (minion_enabled)
4361         {
4362             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4363                                     NV_NVLSTAT_LNK1, 0, &data);
4364             if (status != NVL_SUCCESS)
4365             {
4366                 return status;
4367             }
4368             data = DRF_VAL(_NVLSTAT, _LNK1, _ERROR_COUNT1_RECOVERY_EVENTS_VALUE, data);
4369         }
4370         else
4371         {
4372             // MINION disabled
4373             data = 0;
4374         }
4375 
4376         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)]
4377             = data;
4378     }
4379 
4380     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)
4381     {
4382         if (minion_enabled)
4383         {
4384             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4385                                     NV_NVLSTAT_RX00, 0, &data);
4386             if (status != NVL_SUCCESS)
4387             {
4388                 return status;
4389             }
4390             data = DRF_VAL(_NVLSTAT, _RX00, _REPLAY_EVENTS_VALUE, data);
4391         }
4392         else
4393         {
4394             // MINION disabled
4395             data = 0;
4396         }
4397 
4398         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)]
4399             = data;
4400     }
4401 
4402     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)
4403     {
4404         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)] = 0;
4405     }
4406 
4407     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)
4408     {
4409         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)] = 0;
4410     }
4411 
4412     return NVL_SUCCESS;
4413 }
4414 
4415 static void
4416 nvswitch_ctrl_clear_throughput_counters_lr10
4417 (
4418     nvswitch_device *device,
4419     nvlink_link     *link,
4420     NvU32            counterMask
4421 )
4422 {
4423     NvU32 data;
4424 
4425     // TX
4426     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL);
4427     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX0)
4428     {
4429         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX0, 0x1, data);
4430     }
4431     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX1)
4432     {
4433         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX1, 0x1, data);
4434     }
4435     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4436 
4437     // RX
4438     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL);
4439     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX0)
4440     {
4441         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX0, 0x1, data);
4442     }
4443     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX1)
4444     {
4445         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX1, 0x1, data);
4446     }
4447     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4448 }
4449 
4450 static NvlStatus
4451 nvswitch_ctrl_clear_dl_error_counters_lr10
4452 (
4453     nvswitch_device *device,
4454     nvlink_link     *link,
4455     NvU32            counterMask
4456 )
4457 {
4458     NvU32           data;
4459 
4460     if ((!counterMask) ||
4461         (!(counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4462                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4463                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4464                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4465                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4466                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4467                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4468                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4469                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS |
4470                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY |
4471                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY))))
4472     {
4473         NVSWITCH_PRINT(device, INFO,
4474             "%s: Link%d: No error count clear request, counterMask (0x%x). Returning!\n",
4475             __FUNCTION__, link->linkNumber, counterMask);
4476         return NVL_SUCCESS;
4477     }
4478 
4479     // With Minion initialized, send command to minion
4480     if (nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)))
4481     {
4482         return nvswitch_minion_clear_dl_error_counters_lr10(device, link->linkNumber);
4483     }
4484 
4485     // With Minion not-initialized, perform with the registers
4486     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4487     {
4488         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4489         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_FLIT_CRC, _CLEAR, data);
4490         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4491         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4492     }
4493 
4494     if (counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4495                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4496                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4497                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4498                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4499                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4500                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4501                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4502                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS))
4503     {
4504         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4505         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_LANE_CRC, _CLEAR, data);
4506         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4507         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS)
4508         {
4509             data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_ECC_COUNTS, _CLEAR, data);
4510         }
4511         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4512     }
4513 
4514     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4515     {
4516         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL);
4517         data = FLD_SET_DRF(_NVLDL_TX, _ERROR_COUNT_CTRL, _CLEAR_REPLAY, _CLEAR, data);
4518         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL, data);
4519     }
4520 
4521     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4522     {
4523         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL);
4524         data = FLD_SET_DRF(_NVLDL_TOP, _ERROR_COUNT_CTRL, _CLEAR_RECOVERY, _CLEAR, data);
4525         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL, data);
4526     }
4527     return NVL_SUCCESS;
4528 }
4529 
4530 /*
4531  * CTRL_NVSWITCH_GET_INFO
4532  *
4533  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4534  * This provides a single API to query for multiple pieces of miscellaneous
4535  * information via a single call.
4536  *
4537  */
4538 
4539 static NvU32
4540 _nvswitch_get_info_chip_id
4541 (
4542     nvswitch_device *device
4543 )
4544 {
4545     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4546 
4547     return (DRF_VAL(_PSMC, _BOOT_42, _CHIP_ID, val));
4548 }
4549 
4550 static NvU32
4551 _nvswitch_get_info_revision_major
4552 (
4553     nvswitch_device *device
4554 )
4555 {
4556     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4557 
4558     return (DRF_VAL(_PSMC, _BOOT_42, _MAJOR_REVISION, val));
4559 }
4560 
4561 static NvU32
4562 _nvswitch_get_info_revision_minor
4563 (
4564     nvswitch_device *device
4565 )
4566 {
4567     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4568 
4569     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_REVISION, val));
4570 }
4571 
4572 static NvU32
4573 _nvswitch_get_info_revision_minor_ext
4574 (
4575     nvswitch_device *device
4576 )
4577 {
4578     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4579 
4580     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_EXTENDED_REVISION, val));
4581 }
4582 
4583 static NvBool
4584 _nvswitch_inforom_bbx_supported
4585 (
4586     nvswitch_device *device
4587 )
4588 {
4589     return NV_FALSE;
4590 }
4591 
4592 /*
4593  * CTRL_NVSWITCH_GET_INFO
4594  *
4595  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4596  * This provides a single API to query for multiple pieces of miscellaneous
4597  * information via a single call.
4598  *
4599  */
4600 
4601 NvlStatus
4602 nvswitch_ctrl_get_info_lr10
4603 (
4604     nvswitch_device *device,
4605     NVSWITCH_GET_INFO *p
4606 )
4607 {
4608     NvlStatus retval = NVL_SUCCESS;
4609     NvU32 i;
4610 
4611     if (p->count > NVSWITCH_GET_INFO_COUNT_MAX)
4612     {
4613         NVSWITCH_PRINT(device, ERROR,
4614             "%s: Invalid args\n",
4615             __FUNCTION__);
4616         return -NVL_BAD_ARGS;
4617     }
4618 
4619     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_INFO_COUNT_MAX);
4620 
4621     for (i = 0; i < p->count; i++)
4622     {
4623         switch (p->index[i])
4624         {
4625             case NVSWITCH_GET_INFO_INDEX_ARCH:
4626                 p->info[i] = device->chip_arch;
4627                 break;
4628             case NVSWITCH_GET_INFO_INDEX_PLATFORM:
4629                 if (IS_RTLSIM(device))
4630                 {
4631                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_RTLSIM;
4632                 }
4633                 else if (IS_FMODEL(device))
4634                 {
4635                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_FMODEL;
4636                 }
4637                 else if (IS_EMULATION(device))
4638                 {
4639                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_EMULATION;
4640                 }
4641                 else
4642                 {
4643                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_SILICON;
4644                 }
4645                 break;
4646             case NVSWITCH_GET_INFO_INDEX_IMPL:
4647                 p->info[i] = device->chip_impl;
4648                 break;
4649             case NVSWITCH_GET_INFO_INDEX_CHIPID:
4650                 p->info[i] = _nvswitch_get_info_chip_id(device);
4651                 break;
4652             case NVSWITCH_GET_INFO_INDEX_REVISION_MAJOR:
4653                 p->info[i] = _nvswitch_get_info_revision_major(device);
4654                 break;
4655             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR:
4656                 p->info[i] = _nvswitch_get_info_revision_minor(device);
4657                 break;
4658             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR_EXT:
4659                 p->info[i] = _nvswitch_get_info_revision_minor_ext(device);
4660                 break;
4661             case NVSWITCH_GET_INFO_INDEX_DEVICE_ID:
4662                 p->info[i] = device->nvlink_device->pciInfo.pciDeviceId;
4663                 break;
4664             case NVSWITCH_GET_INFO_INDEX_NUM_PORTS:
4665                 p->info[i] = NVSWITCH_LINK_COUNT(device);
4666                 break;
4667             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_31_0:
4668                 p->info[i] = NvU64_LO32(nvswitch_get_enabled_link_mask(device));
4669                 break;
4670             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_63_32:
4671                 p->info[i] = NvU64_HI32(nvswitch_get_enabled_link_mask(device));
4672                 break;
4673             case NVSWITCH_GET_INFO_INDEX_NUM_VCS:
4674                 p->info[i] = _nvswitch_get_num_vcs_lr10(device);
4675                 break;
4676             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE:
4677                 {
4678                     NvU32 remap_ram_sel;
4679                     NvlStatus status;
4680 
4681                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_PRIMARY, &remap_ram_sel);
4682                     if (status == NVL_SUCCESS)
4683                     {
4684                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4685                     }
4686                     else
4687                     {
4688                         p->info[i] = 0;
4689                     }
4690                 }
4691                 break;
4692             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTA_TABLE_SIZE:
4693                 {
4694                     NvU32 remap_ram_sel;
4695                     NvlStatus status;
4696 
4697                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTA, &remap_ram_sel);
4698                     if (status == NVL_SUCCESS)
4699                     {
4700                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4701                     }
4702                     else
4703                     {
4704                         p->info[i] = 0;
4705                     }
4706                 }
4707                 break;
4708             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTB_TABLE_SIZE:
4709                 {
4710                     NvU32 remap_ram_sel;
4711                     NvlStatus status;
4712 
4713                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTB, &remap_ram_sel);
4714                     if (status == NVL_SUCCESS)
4715                     {
4716                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4717                     }
4718                     else
4719                     {
4720                         p->info[i] = 0;
4721                     }
4722                 }
4723                 break;
4724             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_MULTICAST_TABLE_SIZE:
4725                 {
4726                     NvU32 remap_ram_sel;
4727                     NvlStatus status;
4728 
4729                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_MULTICAST, &remap_ram_sel);
4730                     if (status == NVL_SUCCESS)
4731                     {
4732                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4733                     }
4734                     else
4735                     {
4736                         p->info[i] = 0;
4737                     }
4738                 }
4739                 break;
4740             case NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE:
4741                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
4742                 break;
4743             case NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE:
4744                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
4745                 break;
4746             case NVSWITCH_GET_INFO_INDEX_FREQ_KHZ:
4747                 p->info[i] = device->switch_pll.freq_khz;
4748                 break;
4749             case NVSWITCH_GET_INFO_INDEX_VCOFREQ_KHZ:
4750                 p->info[i] = device->switch_pll.vco_freq_khz;
4751                 break;
4752             case NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT:
4753                 retval = -NVL_ERR_NOT_SUPPORTED;
4754                 break;
4755             case NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID:
4756                 p->info[i] = nvswitch_read_physical_id(device);
4757                 break;
4758             case NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN:
4759                 p->info[i] = device->nvlink_device->pciInfo.domain;
4760                 break;
4761             case NVSWITCH_GET_INFO_INDEX_PCI_BUS:
4762                 p->info[i] = device->nvlink_device->pciInfo.bus;
4763                 break;
4764             case NVSWITCH_GET_INFO_INDEX_PCI_DEVICE:
4765                 p->info[i] = device->nvlink_device->pciInfo.device;
4766                 break;
4767             case NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION:
4768                 p->info[i] = device->nvlink_device->pciInfo.function;
4769                 break;
4770             default:
4771                 NVSWITCH_PRINT(device, ERROR,
4772                     "%s: Undefined NVSWITCH_GET_INFO_INDEX 0x%x\n",
4773                     __FUNCTION__,
4774                     p->index[i]);
4775                 retval = -NVL_BAD_ARGS;
4776                 break;
4777         }
4778     }
4779 
4780     return retval;
4781 }
4782 
4783 NvlStatus
4784 nvswitch_set_nport_port_config_lr10
4785 (
4786     nvswitch_device *device,
4787     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4788 )
4789 {
4790     NvU32   val;
4791 
4792     if (p->requesterLinkID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGID))
4793     {
4794         NVSWITCH_PRINT(device, ERROR,
4795             "%s: Invalid requester RID 0x%x\n",
4796             __FUNCTION__, p->requesterLinkID);
4797         return -NVL_BAD_ARGS;
4798     }
4799 
4800     if (p->requesterLanID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGLAN))
4801     {
4802         NVSWITCH_PRINT(device, ERROR,
4803             "%s: Invalid requester RLAN 0x%x\n",
4804             __FUNCTION__, p->requesterLanID);
4805         return -NVL_BAD_ARGS;
4806     }
4807 
4808     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL);
4809     switch (p->type)
4810     {
4811         case CONNECT_ACCESS_GPU:
4812         case CONNECT_ACCESS_CPU:
4813         case CONNECT_ACCESS_SWITCH:
4814             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, val);
4815             break;
4816         case CONNECT_TRUNK_SWITCH:
4817             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, val);
4818             break;
4819         default:
4820             NVSWITCH_PRINT(device, ERROR,
4821                 "%s: invalid type #%d\n",
4822                 __FUNCTION__, p->type);
4823             return -NVL_BAD_ARGS;
4824     }
4825 
4826     switch(p->count)
4827     {
4828         case CONNECT_COUNT_512:
4829             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _512, val);
4830             break;
4831         case CONNECT_COUNT_1024:
4832             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, val);
4833             break;
4834         case CONNECT_COUNT_2048:
4835             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, val);
4836             break;
4837         default:
4838             NVSWITCH_PRINT(device, ERROR,
4839                 "%s: invalid count #%d\n",
4840                 __FUNCTION__, p->count);
4841             return -NVL_BAD_ARGS;
4842     }
4843     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val);
4844 
4845     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _REQLINKID,
4846         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID, p->requesterLinkID) |
4847         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGLAN, p->requesterLanID));
4848 
4849     return NVL_SUCCESS;
4850 }
4851 
4852 NvlStatus
4853 nvswitch_ctrl_set_switch_port_config_lr10
4854 (
4855     nvswitch_device *device,
4856     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4857 )
4858 {
4859     nvlink_link *link;
4860     NvU32 val;
4861     NvlStatus status;
4862 
4863     if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->portNum, NPORT))
4864     {
4865         NVSWITCH_PRINT(device, ERROR,
4866             "%s: invalid link #%d\n",
4867             __FUNCTION__, p->portNum);
4868         return -NVL_BAD_ARGS;
4869     }
4870 
4871     if (p->enableVC1 && (p->type != CONNECT_TRUNK_SWITCH))
4872     {
4873         NVSWITCH_PRINT(device, ERROR,
4874             "%s: VC1 only allowed on trunk links\n",
4875             __FUNCTION__);
4876         return -NVL_BAD_ARGS;
4877     }
4878 
4879     // Validate chip-specific NPORT settings and program port config settings.
4880     status = nvswitch_set_nport_port_config(device, p);
4881     if (status != NVL_SUCCESS)
4882     {
4883         return status;
4884     }
4885 
4886     link = nvswitch_get_link(device, (NvU8)p->portNum);
4887     if (link == NULL)
4888     {
4889         NVSWITCH_PRINT(device, ERROR,
4890             "%s: invalid link\n",
4891             __FUNCTION__);
4892         return -NVL_ERR_INVALID_STATE;
4893     }
4894 
4895     //
4896     // If ac_coupled_mask is configured during nvswitch_create_link,
4897     // give preference to it.
4898     //
4899     if (device->regkeys.ac_coupled_mask  ||
4900         device->regkeys.ac_coupled_mask2 ||
4901         device->firmware.nvlink.link_ac_coupled_mask)
4902     {
4903         if (link->ac_coupled != p->acCoupled)
4904         {
4905             NVSWITCH_PRINT(device, ERROR,
4906                 "%s: port[%d]: Unsupported AC coupled change (%s)\n",
4907                 __FUNCTION__, p->portNum, p->acCoupled ? "AC" : "DC");
4908             return -NVL_BAD_ARGS;
4909         }
4910     }
4911 
4912     link->ac_coupled = p->acCoupled;
4913 
4914     // AC vs DC mode SYSTEM register
4915     if (link->ac_coupled)
4916     {
4917         //
4918         // In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command
4919         // Here we just setup the register with the proper info
4920         //
4921         val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK,
4922                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL);
4923         val = FLD_SET_DRF(_NVLIPT_LNK,
4924                 _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val);
4925         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
4926                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val);
4927     }
4928 
4929     // If _BUFFER_RDY is asserted, credits are locked.
4930     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL_BUFFER_READY);
4931     if (FLD_TEST_DRF(_NPORT, _CTRL_BUFFER_READY, _BUFFERRDY, _ENABLE, val))
4932     {
4933         NVSWITCH_PRINT(device, SETUP,
4934             "%s: port[%d]: BUFFERRDY already enabled.\n",
4935             __FUNCTION__, p->portNum);
4936         return NVL_SUCCESS;
4937     }
4938 
4939     return NVL_SUCCESS;
4940 }
4941 
4942 NvlStatus
4943 nvswitch_ctrl_set_ingress_request_table_lr10
4944 (
4945     nvswitch_device *device,
4946     NVSWITCH_SET_INGRESS_REQUEST_TABLE *p
4947 )
4948 {
4949     return -NVL_ERR_NOT_SUPPORTED;
4950 }
4951 
4952 NvlStatus
4953 nvswitch_ctrl_get_ingress_request_table_lr10
4954 (
4955     nvswitch_device *device,
4956     NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params
4957 )
4958 {
4959     return -NVL_ERR_NOT_SUPPORTED;
4960 }
4961 
4962 NvlStatus
4963 nvswitch_ctrl_set_ingress_request_valid_lr10
4964 (
4965     nvswitch_device *device,
4966     NVSWITCH_SET_INGRESS_REQUEST_VALID *p
4967 )
4968 {
4969     return -NVL_ERR_NOT_SUPPORTED;
4970 }
4971 
4972 NvlStatus
4973 nvswitch_ctrl_get_ingress_response_table_lr10
4974 (
4975     nvswitch_device *device,
4976     NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params
4977 )
4978 {
4979     return -NVL_ERR_NOT_SUPPORTED;
4980 }
4981 
4982 
4983 NvlStatus
4984 nvswitch_ctrl_set_ingress_response_table_lr10
4985 (
4986     nvswitch_device *device,
4987     NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p
4988 )
4989 {
4990     return -NVL_ERR_NOT_SUPPORTED;
4991 }
4992 
4993 static NvlStatus
4994 nvswitch_ctrl_set_ganged_link_table_lr10
4995 (
4996     nvswitch_device *device,
4997     NVSWITCH_SET_GANGED_LINK_TABLE *p
4998 )
4999 {
5000     return -NVL_ERR_NOT_SUPPORTED;
5001 }
5002 
5003 static NvlStatus
5004 nvswitch_ctrl_get_internal_latency_lr10
5005 (
5006     nvswitch_device *device,
5007     NVSWITCH_GET_INTERNAL_LATENCY *pLatency
5008 )
5009 {
5010     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5011     NvU32 vc_selector = pLatency->vc_selector;
5012     NvU32 idx_nport;
5013 
5014     // Validate VC selector
5015     if (vc_selector >= NVSWITCH_NUM_VCS_LR10)
5016     {
5017         return -NVL_BAD_ARGS;
5018     }
5019 
5020     nvswitch_os_memset(pLatency, 0, sizeof(*pLatency));
5021     pLatency->vc_selector = vc_selector;
5022 
5023     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5024     {
5025         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
5026         {
5027             continue;
5028         }
5029 
5030         pLatency->egressHistogram[idx_nport].low =
5031             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low;
5032         pLatency->egressHistogram[idx_nport].medium =
5033             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium;
5034         pLatency->egressHistogram[idx_nport].high =
5035            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high;
5036         pLatency->egressHistogram[idx_nport].panic =
5037            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic;
5038         pLatency->egressHistogram[idx_nport].count =
5039            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count;
5040     }
5041 
5042     pLatency->elapsed_time_msec =
5043       (chip_device->latency_stats->latency[vc_selector].last_read_time_nsec -
5044        chip_device->latency_stats->latency[vc_selector].start_time_nsec)/1000000ULL;
5045 
5046     chip_device->latency_stats->latency[vc_selector].start_time_nsec =
5047         chip_device->latency_stats->latency[vc_selector].last_read_time_nsec;
5048 
5049     chip_device->latency_stats->latency[vc_selector].count = 0;
5050 
5051     // Clear accum_latency[]
5052     for (idx_nport = 0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5053     {
5054         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low = 0;
5055         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium = 0;
5056         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high = 0;
5057         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic = 0;
5058         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count = 0;
5059     }
5060 
5061     return NVL_SUCCESS;
5062 }
5063 
5064 NvlStatus
5065 nvswitch_ctrl_set_latency_bins_lr10
5066 (
5067     nvswitch_device *device,
5068     NVSWITCH_SET_LATENCY_BINS *pLatency
5069 )
5070 {
5071     NvU32 vc_selector;
5072     const NvU32 freq_mhz = 1330;
5073     const NvU32 switchpll_hz = freq_mhz * 1000000ULL; // TODO: Update this with device->switch_pll.freq_khz after LR10 PLL update
5074     const NvU32 min_threshold = 10;   // Must be > zero to avoid div by zero
5075     const NvU32 max_threshold = 10000;
5076 
5077     // Quick input validation and ns to register value conversion
5078     for (vc_selector = 0; vc_selector < NVSWITCH_NUM_VCS_LR10; vc_selector++)
5079     {
5080         if ((pLatency->bin[vc_selector].lowThreshold > max_threshold)                           ||
5081             (pLatency->bin[vc_selector].lowThreshold < min_threshold)                           ||
5082             (pLatency->bin[vc_selector].medThreshold > max_threshold)                           ||
5083             (pLatency->bin[vc_selector].medThreshold < min_threshold)                           ||
5084             (pLatency->bin[vc_selector].hiThreshold  > max_threshold)                           ||
5085             (pLatency->bin[vc_selector].hiThreshold  < min_threshold)                           ||
5086             (pLatency->bin[vc_selector].lowThreshold > pLatency->bin[vc_selector].medThreshold) ||
5087             (pLatency->bin[vc_selector].medThreshold > pLatency->bin[vc_selector].hiThreshold))
5088         {
5089             return -NVL_BAD_ARGS;
5090         }
5091 
5092         pLatency->bin[vc_selector].lowThreshold =
5093             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].lowThreshold);
5094         pLatency->bin[vc_selector].medThreshold =
5095             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].medThreshold);
5096         pLatency->bin[vc_selector].hiThreshold =
5097             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].hiThreshold);
5098 
5099         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _LOW,    vc_selector, pLatency->bin[vc_selector].lowThreshold);
5100         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _MEDIUM, vc_selector, pLatency->bin[vc_selector].medThreshold);
5101         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _HIGH,   vc_selector, pLatency->bin[vc_selector].hiThreshold);
5102     }
5103 
5104     return NVL_SUCCESS;
5105 }
5106 
5107 #define NV_NPORT_REQLINKID_REQROUTINGLAN_1024  18:18
5108 #define NV_NPORT_REQLINKID_REQROUTINGLAN_2048  18:17
5109 
5110 /*
5111  * @brief Returns the ingress requester link id.
5112  *
5113  * On LR10, REQROUTINGID only gives the endpoint but not the specific port of the response packet.
5114  * To identify the specific port, the routing_ID must be appended with the upper bits of REQROUTINGLAN.
5115  *
5116  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 1024, the upper bit of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[9].
5117  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 2048, the upper two bits of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[10:9].
5118  *
5119  * @param[in] device            nvswitch device
5120  * @param[in] params            NVSWITCH_GET_INGRESS_REQLINKID_PARAMS
5121  *
5122  * @returns                     NVL_SUCCESS if action succeeded,
5123  *                              -NVL_ERR_INVALID_STATE invalid link
5124  */
5125 NvlStatus
5126 nvswitch_ctrl_get_ingress_reqlinkid_lr10
5127 (
5128     nvswitch_device *device,
5129     NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params
5130 )
5131 {
5132     NvU32 regval;
5133     NvU32 reqRid;
5134     NvU32 reqRlan;
5135     NvU32 rlan_shift = DRF_SHIFT_RT(NV_NPORT_REQLINKID_REQROUTINGID) + 1;
5136 
5137     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
5138     {
5139         return -NVL_BAD_ARGS;
5140     }
5141 
5142     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _REQLINKID);
5143     reqRid = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID, regval);
5144     reqRlan = regval;
5145 
5146     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _CTRL);
5147     if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, regval))
5148     {
5149         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_1024, reqRlan);
5150         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5151     }
5152     else if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, regval))
5153     {
5154         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_2048, reqRlan);
5155         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5156     }
5157     else
5158     {
5159         params->requesterLinkID = reqRid;
5160     }
5161 
5162     return NVL_SUCCESS;
5163 }
5164 
5165 /*
5166  * REGISTER_READ/_WRITE
5167  * Provides direct access to the MMIO space for trusted clients like MODS.
5168  * This API should not be exposed to unsecure clients.
5169  */
5170 
5171 /*
5172  * _nvswitch_get_engine_base
5173  * Used by REGISTER_READ/WRITE API.  Looks up an engine based on device/instance
5174  * and returns the base address in BAR0.
5175  *
5176  * register_rw_engine   [in] REGISTER_RW_ENGINE_*
5177  * instance             [in] physical instance of device
5178  * bcast                [in] FALSE: find unicast base address
5179  *                           TRUE:  find broadcast base address
5180  * base_addr            [out] base address in BAR0 of requested device
5181  *
5182  * Returns              NVL_SUCCESS: Device base address successfully found
5183  *                      else device lookup failed
5184  */
5185 
5186 static NvlStatus
5187 _nvswitch_get_engine_base_lr10
5188 (
5189     nvswitch_device *device,
5190     NvU32   register_rw_engine,     // REGISTER_RW_ENGINE_*
5191     NvU32   instance,               // device instance
5192     NvBool  bcast,
5193     NvU32   *base_addr
5194 )
5195 {
5196     NvU32 base = 0;
5197     ENGINE_DESCRIPTOR_TYPE_LR10  *engine = NULL;
5198     NvlStatus retval = NVL_SUCCESS;
5199     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5200 
5201     // Find the engine descriptor matching the request
5202     engine = NULL;
5203 
5204     switch (register_rw_engine)
5205     {
5206         case REGISTER_RW_ENGINE_RAW:
5207             // Special case raw IO
5208             if ((instance != 0) ||
5209                 (bcast != NV_FALSE))
5210             {
5211                 retval = -NVL_BAD_ARGS;
5212             }
5213         break;
5214 
5215         case REGISTER_RW_ENGINE_CLKS:
5216         case REGISTER_RW_ENGINE_FUSE:
5217         case REGISTER_RW_ENGINE_JTAG:
5218         case REGISTER_RW_ENGINE_PMGR:
5219         case REGISTER_RW_ENGINE_XP3G:
5220             //
5221             // Legacy devices are always single-instance, unicast-only.
5222             // These manuals are BAR0 offset-based, not IP-based.  Treat them
5223             // the same as RAW.
5224             //
5225             if ((instance != 0) ||
5226                 (bcast != NV_FALSE))
5227             {
5228                 retval = -NVL_BAD_ARGS;
5229             }
5230             register_rw_engine = REGISTER_RW_ENGINE_RAW;
5231         break;
5232 
5233         case REGISTER_RW_ENGINE_SAW:
5234             if (bcast)
5235             {
5236                 retval = -NVL_BAD_ARGS;
5237             }
5238             else
5239             {
5240                 if (NVSWITCH_ENG_VALID_LR10(device, SAW, instance))
5241                 {
5242                     engine = &chip_device->engSAW[instance];
5243                 }
5244             }
5245         break;
5246 
5247         case REGISTER_RW_ENGINE_XVE:
5248             if (bcast)
5249             {
5250                 retval = -NVL_BAD_ARGS;
5251             }
5252             else
5253             {
5254                 if (NVSWITCH_ENG_VALID_LR10(device, XVE, instance))
5255                 {
5256                     engine = &chip_device->engXVE[instance];
5257                 }
5258             }
5259         break;
5260 
5261         case REGISTER_RW_ENGINE_SOE:
5262             if (bcast)
5263             {
5264                 retval = -NVL_BAD_ARGS;
5265             }
5266             else
5267             {
5268                 if (NVSWITCH_ENG_VALID_LR10(device, SOE, instance))
5269                 {
5270                     engine = &chip_device->engSOE[instance];
5271                 }
5272             }
5273         break;
5274 
5275         case REGISTER_RW_ENGINE_SE:
5276             if (bcast)
5277             {
5278                 retval = -NVL_BAD_ARGS;
5279             }
5280             else
5281             {
5282                 if (NVSWITCH_ENG_VALID_LR10(device, SE, instance))
5283                 {
5284                     engine = &chip_device->engSE[instance];
5285                 }
5286             }
5287         break;
5288 
5289         case REGISTER_RW_ENGINE_NVLW:
5290             if (bcast)
5291             {
5292                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW_BCAST, instance))
5293                 {
5294                     engine = &chip_device->engNVLW_BCAST[instance];
5295                 }
5296             }
5297             else
5298             {
5299                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW, instance))
5300                 {
5301                     engine = &chip_device->engNVLW[instance];
5302                 }
5303             }
5304         break;
5305 
5306         case REGISTER_RW_ENGINE_MINION:
5307             if (bcast)
5308             {
5309                 if (NVSWITCH_ENG_VALID_LR10(device, MINION_BCAST, instance))
5310                 {
5311                     engine = &chip_device->engMINION_BCAST[instance];
5312                 }
5313             }
5314             else
5315             {
5316                 if (NVSWITCH_ENG_VALID_LR10(device, MINION, instance))
5317                 {
5318                     engine = &chip_device->engMINION[instance];
5319                 }
5320             }
5321         break;
5322 
5323         case REGISTER_RW_ENGINE_NVLIPT:
5324             if (bcast)
5325             {
5326                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_BCAST, instance))
5327                 {
5328                     engine = &chip_device->engNVLIPT_BCAST[instance];
5329                 }
5330             }
5331             else
5332             {
5333                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, instance))
5334                 {
5335                     engine = &chip_device->engNVLIPT[instance];
5336                 }
5337             }
5338         break;
5339 
5340         case REGISTER_RW_ENGINE_NVLTLC:
5341             if (bcast)
5342             {
5343                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_BCAST, instance))
5344                 {
5345                     engine = &chip_device->engNVLTLC_BCAST[instance];
5346                 }
5347             }
5348             else
5349             {
5350                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC, instance))
5351                 {
5352                     engine = &chip_device->engNVLTLC[instance];
5353                 }
5354             }
5355         break;
5356 
5357         case REGISTER_RW_ENGINE_NVLTLC_MULTICAST:
5358             if (bcast)
5359             {
5360                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST_BCAST, instance))
5361                 {
5362                     engine = &chip_device->engNVLTLC_MULTICAST_BCAST[instance];
5363                 }
5364             }
5365             else
5366             {
5367                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST, instance))
5368                 {
5369                     engine = &chip_device->engNVLTLC_MULTICAST[instance];
5370                 }
5371             }
5372         break;
5373 
5374         case REGISTER_RW_ENGINE_NPG:
5375             if (bcast)
5376             {
5377                 if (NVSWITCH_ENG_VALID_LR10(device, NPG_BCAST, instance))
5378                 {
5379                     engine = &chip_device->engNPG_BCAST[instance];
5380                 }
5381             }
5382             else
5383             {
5384                 if (NVSWITCH_ENG_VALID_LR10(device, NPG, instance))
5385                 {
5386                     engine = &chip_device->engNPG[instance];
5387                 }
5388             }
5389         break;
5390 
5391         case REGISTER_RW_ENGINE_NPORT:
5392             if (bcast)
5393             {
5394                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_BCAST, instance))
5395                 {
5396                     engine = &chip_device->engNPORT_BCAST[instance];
5397                 }
5398             }
5399             else
5400             {
5401                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT, instance))
5402                 {
5403                     engine = &chip_device->engNPORT[instance];
5404                 }
5405             }
5406         break;
5407 
5408         case REGISTER_RW_ENGINE_NPORT_MULTICAST:
5409             if (bcast)
5410             {
5411                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST_BCAST, instance))
5412                 {
5413                     engine = &chip_device->engNPORT_MULTICAST_BCAST[instance];
5414                 }
5415             }
5416             else
5417             {
5418                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST, instance))
5419                 {
5420                     engine = &chip_device->engNPORT_MULTICAST[instance];
5421                 }
5422             }
5423         break;
5424 
5425         case REGISTER_RW_ENGINE_NVLIPT_LNK:
5426             if (bcast)
5427             {
5428                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_BCAST, instance))
5429                 {
5430                     engine = &chip_device->engNVLIPT_LNK_BCAST[instance];
5431                 }
5432             }
5433             else
5434             {
5435                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK, instance))
5436                 {
5437                     engine = &chip_device->engNVLIPT_LNK[instance];
5438                 }
5439             }
5440         break;
5441 
5442         case REGISTER_RW_ENGINE_NVLIPT_LNK_MULTICAST:
5443             if (bcast)
5444             {
5445                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST_BCAST, instance))
5446                 {
5447                     engine = &chip_device->engNVLIPT_LNK_MULTICAST_BCAST[instance];
5448                 }
5449             }
5450             else
5451             {
5452                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST, instance))
5453                 {
5454                     engine = &chip_device->engNVLIPT_LNK_MULTICAST[instance];
5455                 }
5456             }
5457         break;
5458 
5459         case REGISTER_RW_ENGINE_PLL:
5460             if (bcast)
5461             {
5462                 if (NVSWITCH_ENG_VALID_LR10(device, PLL_BCAST, instance))
5463                 {
5464                     engine = &chip_device->engPLL_BCAST[instance];
5465                 }
5466             }
5467             else
5468             {
5469                 if (NVSWITCH_ENG_VALID_LR10(device, PLL, instance))
5470                 {
5471                     engine = &chip_device->engPLL[instance];
5472                 }
5473             }
5474         break;
5475 
5476         case REGISTER_RW_ENGINE_NVLDL:
5477             if (bcast)
5478             {
5479                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_BCAST, instance))
5480                 {
5481                     engine = &chip_device->engNVLDL_BCAST[instance];
5482                 }
5483             }
5484             else
5485             {
5486                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL, instance))
5487                 {
5488                     engine = &chip_device->engNVLDL[instance];
5489                 }
5490             }
5491         break;
5492 
5493         case REGISTER_RW_ENGINE_NVLDL_MULTICAST:
5494             if (bcast)
5495             {
5496                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST_BCAST, instance))
5497                 {
5498                     engine = &chip_device->engNVLDL_MULTICAST_BCAST[instance];
5499                 }
5500             }
5501             else
5502             {
5503                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST, instance))
5504                 {
5505                     engine = &chip_device->engNVLDL_MULTICAST[instance];
5506                 }
5507             }
5508         break;
5509 
5510         case REGISTER_RW_ENGINE_NXBAR:
5511             if (bcast)
5512             {
5513                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR_BCAST, instance))
5514                 {
5515                     engine = &chip_device->engNXBAR_BCAST[instance];
5516                 }
5517             }
5518             else
5519             {
5520                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR, instance))
5521                 {
5522                     engine = &chip_device->engNXBAR[instance];
5523                 }
5524             }
5525         break;
5526 
5527         case REGISTER_RW_ENGINE_TILE:
5528             if (bcast)
5529             {
5530                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_BCAST, instance))
5531                 {
5532                     engine = &chip_device->engTILE_BCAST[instance];
5533                 }
5534             }
5535             else
5536             {
5537                 if (NVSWITCH_ENG_VALID_LR10(device, TILE, instance))
5538                 {
5539                     engine = &chip_device->engTILE[instance];
5540                 }
5541             }
5542         break;
5543 
5544         case REGISTER_RW_ENGINE_TILE_MULTICAST:
5545             if (bcast)
5546             {
5547                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST_BCAST, instance))
5548                 {
5549                     engine = &chip_device->engTILE_MULTICAST_BCAST[instance];
5550                 }
5551             }
5552             else
5553             {
5554                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST, instance))
5555                 {
5556                     engine = &chip_device->engTILE_MULTICAST[instance];
5557                 }
5558             }
5559         break;
5560 
5561         default:
5562             NVSWITCH_PRINT(device, ERROR,
5563                 "%s: unknown REGISTER_RW_ENGINE 0x%x\n",
5564                 __FUNCTION__,
5565                 register_rw_engine);
5566             engine = NULL;
5567         break;
5568     }
5569 
5570     if (register_rw_engine == REGISTER_RW_ENGINE_RAW)
5571     {
5572         // Raw IO -- client provides full BAR0 offset
5573         base = 0;
5574     }
5575     else
5576     {
5577         // Check engine descriptor was found and valid
5578         if (engine == NULL)
5579         {
5580             retval = -NVL_BAD_ARGS;
5581             NVSWITCH_PRINT(device, ERROR,
5582                 "%s: invalid REGISTER_RW_ENGINE/instance 0x%x(%d)\n",
5583                 __FUNCTION__,
5584                 register_rw_engine,
5585                 instance);
5586         }
5587         else if (!engine->valid)
5588         {
5589             retval = -NVL_UNBOUND_DEVICE;
5590             NVSWITCH_PRINT(device, ERROR,
5591                 "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) disabled or invalid\n",
5592                 __FUNCTION__,
5593                 register_rw_engine,
5594                 instance);
5595         }
5596         else
5597         {
5598             if (bcast && (engine->disc_type == DISCOVERY_TYPE_BROADCAST))
5599             {
5600                 //
5601                 // Caveat emptor: A read of a broadcast register is
5602                 // implementation-specific.
5603                 //
5604                 base = engine->info.bc.bc_addr;
5605             }
5606             else if ((!bcast) && (engine->disc_type == DISCOVERY_TYPE_UNICAST))
5607             {
5608                 base = engine->info.uc.uc_addr;
5609             }
5610 
5611             if (base == 0)
5612             {
5613                 NVSWITCH_PRINT(device, ERROR,
5614                     "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) has %s base address 0!\n",
5615                     __FUNCTION__,
5616                     register_rw_engine,
5617                     instance,
5618                     (bcast ? "BCAST" : "UNICAST" ));
5619                 retval = -NVL_IO_ERROR;
5620             }
5621         }
5622     }
5623 
5624     *base_addr = base;
5625     return retval;
5626 }
5627 
5628 /*
5629  * CTRL_NVSWITCH_REGISTER_READ
5630  *
5631  * This provides direct access to the MMIO space for trusted clients like
5632  * MODS.
5633  * This API should not be exposed to unsecure clients.
5634  */
5635 
5636 static NvlStatus
5637 nvswitch_ctrl_register_read_lr10
5638 (
5639     nvswitch_device *device,
5640     NVSWITCH_REGISTER_READ *p
5641 )
5642 {
5643     NvU32 base;
5644     NvU32 data;
5645     NvlStatus retval = NVL_SUCCESS;
5646 
5647     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, NV_FALSE, &base);
5648     if (retval != NVL_SUCCESS)
5649     {
5650         return retval;
5651     }
5652 
5653     // Make sure target offset isn't out-of-range
5654     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5655     {
5656         return -NVL_IO_ERROR;
5657     }
5658 
5659     //
5660     // Some legacy device manuals are not 0-based (IP style).
5661     //
5662     data = NVSWITCH_OFF_RD32(device, base + p->offset);
5663     p->val = data;
5664 
5665     return NVL_SUCCESS;
5666 }
5667 
5668 /*
5669  * CTRL_NVSWITCH_REGISTER_WRITE
5670  *
5671  * This provides direct access to the MMIO space for trusted clients like
5672  * MODS.
5673  * This API should not be exposed to unsecure clients.
5674  */
5675 
5676 static NvlStatus
5677 nvswitch_ctrl_register_write_lr10
5678 (
5679     nvswitch_device *device,
5680     NVSWITCH_REGISTER_WRITE *p
5681 )
5682 {
5683     NvU32 base;
5684     NvlStatus retval = NVL_SUCCESS;
5685 
5686     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, p->bcast, &base);
5687     if (retval != NVL_SUCCESS)
5688     {
5689         return retval;
5690     }
5691 
5692     // Make sure target offset isn't out-of-range
5693     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5694     {
5695         return -NVL_IO_ERROR;
5696     }
5697 
5698     //
5699     // Some legacy device manuals are not 0-based (IP style).
5700     //
5701     NVSWITCH_OFF_WR32(device, base + p->offset, p->val);
5702 
5703     return NVL_SUCCESS;
5704 }
5705 
5706 NvlStatus
5707 nvswitch_ctrl_get_bios_info_lr10
5708 (
5709     nvswitch_device *device,
5710     NVSWITCH_GET_BIOS_INFO_PARAMS *p
5711 )
5712 {
5713     NvU32 biosVersionBytes;
5714     NvU32 biosOemVersionBytes;
5715     NvU32 biosMagic = 0x9210;
5716 
5717     //
5718     // Example: 92.10.09.00.00 is the formatted version string
5719     //          |         |  |
5720     //          |         |  |__ BIOS OEM version byte
5721     //          |         |
5722     //          |_________|_____ BIOS version bytes
5723     //
5724     biosVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_6);
5725     biosOemVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_7);
5726 
5727     //
5728     // LR10 is built out of core92 and the BIOS version will always begin with
5729     // 92.10.xx.xx.xx
5730     //
5731     if ((biosVersionBytes >> 16) != biosMagic)
5732     {
5733         NVSWITCH_PRINT(device, ERROR,
5734                 "BIOS version not found in scratch register\n");
5735         return -NVL_ERR_INVALID_STATE;
5736     }
5737 
5738     p->version = (((NvU64)biosVersionBytes) << 8) | (biosOemVersionBytes & 0xff);
5739 
5740     return NVL_SUCCESS;
5741 }
5742 
5743 NvlStatus
5744 nvswitch_ctrl_get_inforom_version_lr10
5745 (
5746     nvswitch_device *device,
5747     NVSWITCH_GET_INFOROM_VERSION_PARAMS *p
5748 )
5749 {
5750 
5751     struct inforom *pInforom = device->pInforom;
5752 
5753     if ((pInforom == NULL) || (!pInforom->IMG.bValid))
5754     {
5755         return -NVL_ERR_NOT_SUPPORTED;
5756     }
5757 
5758     if (NV_ARRAY_ELEMENTS(pInforom->IMG.object.version) <
5759         NVSWITCH_INFOROM_VERSION_LEN)
5760     {
5761         NVSWITCH_PRINT(device, ERROR,
5762                        "Inforom IMG object struct smaller than expected\n");
5763         return -NVL_ERR_INVALID_STATE;
5764     }
5765 
5766     nvswitch_inforom_string_copy(pInforom->IMG.object.version, p->version,
5767                                  NVSWITCH_INFOROM_VERSION_LEN);
5768 
5769     return NVL_SUCCESS;
5770 }
5771 
5772 void
5773 nvswitch_corelib_clear_link_state_lr10
5774 (
5775     nvlink_link *link
5776 )
5777 {
5778     // Receiver Detect needs to happen again
5779     link->bRxDetected = NV_FALSE;
5780 
5781     // INITNEGOTIATE needs to happen again
5782     link->bInitnegotiateConfigGood = NV_FALSE;
5783 
5784     // TxCommonMode needs to happen again
5785     link->bTxCommonModeFail = NV_FALSE;
5786 
5787     // SAFE transition needs to happen again
5788     link->bSafeTransitionFail = NV_FALSE;
5789 
5790     // Reset the SW state tracking the link and sublink states
5791     link->state            = NVLINK_LINKSTATE_OFF;
5792     link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF;
5793     link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF;
5794 }
5795 
5796 const static NvU32 nport_reg_addr[] =
5797 {
5798     NV_NPORT_CTRL,
5799     NV_NPORT_CTRL_SLCG,
5800     NV_NPORT_REQLINKID,
5801     NV_NPORT_PORTSTAT_CONTROL,
5802     NV_NPORT_PORTSTAT_SNAP_CONTROL,
5803     NV_NPORT_PORTSTAT_WINDOW_LIMIT,
5804     NV_NPORT_PORTSTAT_LIMIT_LOW_0,
5805     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0,
5806     NV_NPORT_PORTSTAT_LIMIT_HIGH_0,
5807     NV_NPORT_PORTSTAT_LIMIT_LOW_1,
5808     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1,
5809     NV_NPORT_PORTSTAT_LIMIT_HIGH_1,
5810     NV_NPORT_PORTSTAT_LIMIT_LOW_2,
5811     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2,
5812     NV_NPORT_PORTSTAT_LIMIT_HIGH_2,
5813     NV_NPORT_PORTSTAT_LIMIT_LOW_3,
5814     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3,
5815     NV_NPORT_PORTSTAT_LIMIT_HIGH_3,
5816     NV_NPORT_PORTSTAT_LIMIT_LOW_4,
5817     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4,
5818     NV_NPORT_PORTSTAT_LIMIT_HIGH_4,
5819     NV_NPORT_PORTSTAT_LIMIT_LOW_5,
5820     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5,
5821     NV_NPORT_PORTSTAT_LIMIT_HIGH_5,
5822     NV_NPORT_PORTSTAT_LIMIT_LOW_6,
5823     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6,
5824     NV_NPORT_PORTSTAT_LIMIT_HIGH_6,
5825     NV_NPORT_PORTSTAT_LIMIT_LOW_7,
5826     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7,
5827     NV_NPORT_PORTSTAT_LIMIT_HIGH_7,
5828     NV_NPORT_PORTSTAT_SOURCE_FILTER_0,
5829     NV_NPORT_PORTSTAT_SOURCE_FILTER_1,
5830     NV_ROUTE_ROUTE_CONTROL,
5831     NV_ROUTE_CMD_ROUTE_TABLE0,
5832     NV_ROUTE_CMD_ROUTE_TABLE1,
5833     NV_ROUTE_CMD_ROUTE_TABLE2,
5834     NV_ROUTE_CMD_ROUTE_TABLE3,
5835     NV_ROUTE_ERR_LOG_EN_0,
5836     NV_ROUTE_ERR_CONTAIN_EN_0,
5837     NV_ROUTE_ERR_ECC_CTRL,
5838     NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT,
5839     NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT,
5840     NV_INGRESS_ERR_LOG_EN_0,
5841     NV_INGRESS_ERR_CONTAIN_EN_0,
5842     NV_INGRESS_ERR_ECC_CTRL,
5843     NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT,
5844     NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT,
5845     NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT,
5846     NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT,
5847     NV_EGRESS_CTRL,
5848     NV_EGRESS_CTO_TIMER_LIMIT,
5849     NV_EGRESS_ERR_LOG_EN_0,
5850     NV_EGRESS_ERR_CONTAIN_EN_0,
5851     NV_EGRESS_ERR_ECC_CTRL,
5852     NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT,
5853     NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT,
5854     NV_TSTATE_TAGSTATECONTROL,
5855     NV_TSTATE_ATO_TIMER_LIMIT,
5856     NV_TSTATE_CREQ_CAM_LOCK,
5857     NV_TSTATE_ERR_LOG_EN_0,
5858     NV_TSTATE_ERR_CONTAIN_EN_0,
5859     NV_TSTATE_ERR_ECC_CTRL,
5860     NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5861     NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT,
5862     NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT,
5863     NV_SOURCETRACK_CTRL,
5864     NV_SOURCETRACK_MULTISEC_TIMER0,
5865     NV_SOURCETRACK_ERR_LOG_EN_0,
5866     NV_SOURCETRACK_ERR_CONTAIN_EN_0,
5867     NV_SOURCETRACK_ERR_ECC_CTRL,
5868     NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5869     NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5870     NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5871 };
5872 
5873 /*
5874  *  Disable interrupts comming from NPG & NVLW blocks.
5875  */
5876 static void
5877 _nvswitch_link_disable_interrupts_lr10
5878 (
5879     nvswitch_device *device,
5880     NvU32 link
5881 )
5882 {
5883     NvU32 i;
5884 
5885     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5886         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) |
5887         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) |
5888         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0));
5889 
5890     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5891     {
5892         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5893             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x0) |
5894             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x0) |
5895             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0));
5896 
5897         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5898             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x0) |
5899             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x0) |
5900             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x0));
5901 
5902         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5903             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x0) |
5904             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x0) |
5905             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0));
5906     }
5907 }
5908 
5909 /*
5910  *  Reset NPG & NVLW interrupt state.
5911  */
5912 static void
5913 _nvswitch_link_reset_interrupts_lr10
5914 (
5915     nvswitch_device *device,
5916     NvU32 link
5917 )
5918 {
5919     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5920     NvU32 i;
5921 
5922     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5923         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) |
5924         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) |
5925         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1));
5926 
5927     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5928     {
5929         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5930             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x1) |
5931             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x1) |
5932             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x1));
5933 
5934         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5935             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x1) |
5936             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x1) |
5937             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1));
5938 
5939         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5940             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x1) |
5941             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x1) |
5942             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x1));
5943     }
5944 
5945     // Enable interrupts which are disabled to prevent interrupt storm.
5946     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.route.fatal);
5947     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.route.nonfatal);
5948     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.fatal);
5949     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.nonfatal);
5950     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.fatal);
5951     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.nonfatal);
5952     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.fatal);
5953     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.nonfatal);
5954     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.fatal);
5955     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.nonfatal);
5956 
5957     // Clear fatal error status
5958     device->link[link].fatal_error_occurred = NV_FALSE;
5959 }
5960 
5961 /*
5962  * @Brief : Control to reset and drain the links.
5963  *
5964  * @param[in] device        A reference to the device to initialize
5965  * @param[in] linkMask      A mask of link(s) to be reset.
5966  *
5967  * @returns :               NVL_SUCCESS if there were no errors
5968  *                         -NVL_BAD_PARAMS if input parameters are wrong.
5969  *                         -NVL_ERR_INVALID_STATE if other errors are present and a full-chip reset is required.
5970  *                         -NVL_INITIALIZATION_TOTAL_FAILURE if NPORT initialization failed and a retry is required.
5971  */
5972 
5973 NvlStatus
5974 nvswitch_reset_and_drain_links_lr10
5975 (
5976     nvswitch_device *device,
5977     NvU64 link_mask
5978 )
5979 {
5980     NvlStatus status = -NVL_ERR_GENERIC;
5981     nvlink_link *link_info;
5982     NvU32 val;
5983     NvU32 link;
5984     NvU32 idx_nport;
5985     NvU32 npg;
5986     NVSWITCH_TIMEOUT timeout;
5987     NvBool           keepPolling;
5988     NvU32 i;
5989     NvU64 link_mode, tx_sublink_mode, rx_sublink_mode;
5990     NvU32 tx_sublink_submode, rx_sublink_submode;
5991     NvU32 *nport_reg_val = NULL;
5992     NvU32 reg_count = NV_ARRAY_ELEMENTS(nport_reg_addr);
5993     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5994 
5995     if ((link_mask == 0) ||
5996         (link_mask >> NVSWITCH_LINK_COUNT(device)))
5997     {
5998         NVSWITCH_PRINT(device, ERROR,
5999             "%s: Invalid link_mask = 0x%llx\n",
6000             __FUNCTION__, link_mask);
6001 
6002         return -NVL_BAD_ARGS;
6003     }
6004 
6005     // Check for in-active links
6006     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6007     {
6008         if (!nvswitch_is_link_valid(device, link))
6009         {
6010             NVSWITCH_PRINT(device, ERROR,
6011                 "%s: link #%d invalid\n",
6012                 __FUNCTION__, link);
6013 
6014             continue;
6015         }
6016         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, link))
6017         {
6018             NVSWITCH_PRINT(device, ERROR,
6019                 "%s: NPORT #%d invalid\n",
6020                 __FUNCTION__, link);
6021 
6022             continue;
6023         }
6024 
6025         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLW, link))
6026         {
6027             NVSWITCH_PRINT(device, ERROR,
6028                 "%s: NVLW #%d invalid\n",
6029                 __FUNCTION__, link);
6030 
6031             continue;
6032         }
6033     }
6034     FOR_EACH_INDEX_IN_MASK_END;
6035 
6036     // Buffer to backup NPORT state
6037     nport_reg_val = nvswitch_os_malloc(sizeof(nport_reg_addr));
6038     if (nport_reg_val == NULL)
6039     {
6040         NVSWITCH_PRINT(device, ERROR,
6041             "%s: Failed to allocate memory\n",
6042             __FUNCTION__);
6043 
6044         return -NVL_NO_MEM;
6045     }
6046 
6047     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6048     {
6049         // Unregister links to make them unusable while reset is in progress.
6050         link_info = nvswitch_get_link(device, link);
6051         if (link_info == NULL)
6052         {
6053             NVSWITCH_PRINT(device, ERROR,
6054                 "%s: invalid link %d\n",
6055                 __FUNCTION__, link);
6056             continue;
6057         }
6058 
6059         nvlink_lib_unregister_link(link_info);
6060 
6061         //
6062         // Step 0 :
6063         // Prior to starting port reset, FM must shutdown the NVlink links
6064         // it wishes to reset.
6065         // However, with shared-virtualization, FM is unable to shut down the links
6066         // since the GPU is no longer attached to the service VM.
6067         // In this case, we must perform unilateral shutdown on the LR10 side
6068         // of the link.
6069         //
6070         // If links are in OFF or RESET, we don't need to perform shutdown
6071         // If links already went through a proper pseudo-clean shutdown sequence,
6072         // they'll be in SAFE + sublinks in OFF
6073         //
6074 
6075         status = nvswitch_corelib_get_dl_link_mode_lr10(link_info, &link_mode);
6076         if (status != NVL_SUCCESS)
6077         {
6078             NVSWITCH_PRINT(device, ERROR,
6079                 "%s: Unable to get link mode from link %d\n",
6080                 __FUNCTION__, link);
6081             goto nvswitch_reset_and_drain_links_exit;
6082         }
6083         status = nvswitch_corelib_get_tx_mode_lr10(link_info, &tx_sublink_mode, &tx_sublink_submode);
6084         if (status != NVL_SUCCESS)
6085         {
6086             NVSWITCH_PRINT(device, ERROR,
6087                 "%s: Unable to get tx sublink mode from link %d\n",
6088                 __FUNCTION__, link);
6089             goto nvswitch_reset_and_drain_links_exit;
6090         }
6091         status = nvswitch_corelib_get_rx_mode_lr10(link_info, &rx_sublink_mode, &rx_sublink_submode);
6092         if (status != NVL_SUCCESS)
6093         {
6094             NVSWITCH_PRINT(device, ERROR,
6095                 "%s: Unable to get rx sublink mode from link %d\n",
6096                 __FUNCTION__, link);
6097             goto nvswitch_reset_and_drain_links_exit;
6098         }
6099 
6100         if (!((link_mode == NVLINK_LINKSTATE_RESET) ||
6101               (link_mode == NVLINK_LINKSTATE_OFF) ||
6102               ((link_mode == NVLINK_LINKSTATE_SAFE) &&
6103                (tx_sublink_mode == NVLINK_SUBLINK_STATE_TX_OFF) &&
6104                (rx_sublink_mode == NVLINK_SUBLINK_STATE_RX_OFF))))
6105         {
6106             nvswitch_execute_unilateral_link_shutdown_lr10(link_info);
6107             nvswitch_corelib_clear_link_state_lr10(link_info);
6108         }
6109 
6110         //
6111         // Step 1 : Perform surgical reset
6112         // Refer to switch IAS 11.5.2 Link Reset.
6113         //
6114 
6115         // Step 1.a : Backup NPORT state before reset
6116         for (i = 0; i < reg_count; i++)
6117         {
6118             nport_reg_val[i] = NVSWITCH_ENG_OFF_RD32(device, NPORT, _UNICAST, link,
6119                 nport_reg_addr[i]);
6120         }
6121 
6122         // Step 1.b : Assert INGRESS_STOP / EGRESS_STOP
6123         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CTRL_STOP);
6124         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _INGRESS_STOP, _STOP, val);
6125         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _EGRESS_STOP, _STOP, val);
6126         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CTRL_STOP, val);
6127 
6128         // Wait for stop operation to take effect at TLC.
6129         // Expected a minimum of 256 clk cycles.
6130         nvswitch_os_sleep(1);
6131 
6132         //
6133         // Step 1.c : Disable NPG & NVLW interrupts
6134         //
6135         _nvswitch_link_disable_interrupts_lr10(device, link);
6136 
6137         // Step 1.d : Assert NPortWarmReset
6138         npg = link / NVSWITCH_LINKS_PER_NPG;
6139         val = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _WARMRESET);
6140 
6141         idx_nport = link % NVSWITCH_LINKS_PER_NPG;
6142         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET,
6143             DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, ~NVBIT(idx_nport)));
6144 
6145         // Step 1.e : Initiate Minion reset sequence.
6146         status = nvswitch_request_tl_link_state_lr10(link_info,
6147             NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
6148         if (status != NVL_SUCCESS)
6149         {
6150             NVSWITCH_PRINT(device, ERROR,
6151                 "%s: NvLink Reset has failed for link %d\n",
6152                 __FUNCTION__, link);
6153             goto nvswitch_reset_and_drain_links_exit;
6154         }
6155 
6156         // Step 1.e : De-assert NPortWarmReset
6157         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET, val);
6158 
6159         // Step 1.f : Assert and De-assert NPort debug_clear
6160         // to clear the error status
6161         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6162             DRF_NUM(_NPG, _DEBUG_CLEAR, _CLEAR, NVBIT(idx_nport)));
6163 
6164         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6165             DRF_DEF(_NPG, _DEBUG_CLEAR, _CLEAR, _DEASSERT));
6166 
6167         // Step 1.g : Clear CONTAIN_AND_DRAIN to clear contain state (Bug 3115824)
6168         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN,
6169             DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
6170 
6171         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN);
6172         if (FLD_TEST_DRF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE, val))
6173         {
6174             NVSWITCH_PRINT(device, ERROR,
6175                 "%s: NPORT Contain and Drain Clear has failed for link %d\n",
6176                 __FUNCTION__, link);
6177             status = NVL_ERR_INVALID_STATE;
6178             goto nvswitch_reset_and_drain_links_exit;
6179         }
6180 
6181         //
6182         // Step 2 : Assert NPORT Reset after Control & Drain routine.
6183         //  Clear Tagpool, CrumbStore and CAM RAMs
6184         //
6185 
6186         // Step 2.a Clear Tagpool RAM
6187         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _INITIALIZATION,
6188             DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT));
6189 
6190         nvswitch_timeout_create(25 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
6191 
6192         do
6193         {
6194             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
6195 
6196             // Check if NPORT initialization is done
6197             val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _INITIALIZATION);
6198             if (FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6199             {
6200                 break;
6201             }
6202 
6203             nvswitch_os_sleep(1);
6204         }
6205         while (keepPolling);
6206 
6207         if (!FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6208         {
6209             NVSWITCH_PRINT(device, ERROR,
6210                 "%s: Timeout waiting for TAGPOOL Initialization on link %d)\n",
6211                 __FUNCTION__, link);
6212 
6213             status = -NVL_INITIALIZATION_TOTAL_FAILURE;
6214             goto nvswitch_reset_and_drain_links_exit;
6215         }
6216 
6217         // Step 2.b Clear CrumbStore RAM
6218         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6219               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
6220               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6221 
6222         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6223         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6224 
6225         val = DRF_NUM(_TSTATE, _RAM_DATA0, _ECC, 0x7f);
6226         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_TAGPOOL_CRUMBSTORE_TDTID_DEPTH; i++)
6227         {
6228             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, val);
6229         }
6230 
6231         // Step 2.c Clear CAM RAM
6232         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6233               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CREQ_CAM) |
6234               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6235 
6236         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6237         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6238         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA2, 0x0);
6239 
6240         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_CREQ_CAM_DEPTH; i++)
6241         {
6242             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, 0x0);
6243         }
6244 
6245         //
6246         // Step 3 : Restore link state
6247         //
6248 
6249         // Restore NPORT state after reset
6250         for (i = 0; i < reg_count; i++)
6251         {
6252             NVSWITCH_ENG_OFF_WR32(device, NPORT, _UNICAST, link,
6253                                   nport_reg_addr[i], nport_reg_val[i]);
6254         }
6255 
6256         // Initialize GLT
6257         nvswitch_set_ganged_link_table_lr10(device, 0, chip_device->ganged_link_table,
6258                                             ROUTE_GANG_TABLE_SIZE/2);
6259 
6260         // Initialize select scratch registers to 0x0
6261         nvswitch_init_scratch_lr10(device);
6262 
6263         // Reset NVLW and NPORT interrupt state
6264         _nvswitch_link_reset_interrupts_lr10(device, link);
6265 
6266         // Re-register links.
6267         status = nvlink_lib_register_link(device->nvlink_device, link_info);
6268         if (status != NVL_SUCCESS)
6269         {
6270             nvswitch_destroy_link(link_info);
6271             goto nvswitch_reset_and_drain_links_exit;
6272         }
6273     }
6274     FOR_EACH_INDEX_IN_MASK_END;
6275 
6276     // Launch ALI training if applicable
6277     (void)nvswitch_launch_ALI(device);
6278 
6279 nvswitch_reset_and_drain_links_exit:
6280     nvswitch_os_free(nport_reg_val);
6281     return status;
6282 }
6283 
6284 NvlStatus
6285 nvswitch_get_nvlink_ecc_errors_lr10
6286 (
6287     nvswitch_device *device,
6288     NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params
6289 )
6290 {
6291     NvU32 statData;
6292     NvU8 i, j;
6293     NvlStatus status;
6294     NvBool bLaneReversed;
6295 
6296     nvswitch_os_memset(params->errorLink, 0, sizeof(params->errorLink));
6297 
6298     FOR_EACH_INDEX_IN_MASK(64, i, params->linkMask)
6299     {
6300         nvlink_link         *link;
6301         NVSWITCH_LANE_ERROR *errorLane;
6302         NvU8                offset;
6303         NvBool              minion_enabled;
6304         NvU32               sublinkWidth;
6305 
6306         link = nvswitch_get_link(device, i);
6307 
6308         if ((link == NULL) ||
6309             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
6310             (i >= NVSWITCH_LINK_COUNT(device)))
6311         {
6312             return -NVL_BAD_ARGS;
6313         }
6314 
6315         sublinkWidth = device->hal.nvswitch_get_sublink_width(device, i);
6316 
6317         minion_enabled = nvswitch_is_minion_initialized(device,
6318             NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
6319 
6320         bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
6321 
6322         for (j = 0; j < NVSWITCH_NVLINK_MAX_LANES; j++)
6323         {
6324             if (minion_enabled && (j < sublinkWidth))
6325             {
6326                 status = nvswitch_minion_get_dl_status(device, i,
6327                                         (NV_NVLSTAT_RX12 + j), 0, &statData);
6328 
6329                 if (status != NVL_SUCCESS)
6330                 {
6331                     return status;
6332                 }
6333                 offset = bLaneReversed ? ((sublinkWidth - 1) - j) : j;
6334                 errorLane                = &params->errorLink[i].errorLane[offset];
6335                 errorLane->valid         = NV_TRUE;
6336             }
6337             else
6338             {
6339                 // MINION disabled
6340                 statData                 = 0;
6341                 offset                   = j;
6342                 errorLane                = &params->errorLink[i].errorLane[offset];
6343                 errorLane->valid         = NV_FALSE;
6344             }
6345 
6346             errorLane->eccErrorValue = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_VALUE, statData);
6347             errorLane->overflowed    = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_OVER, statData);
6348         }
6349     }
6350     FOR_EACH_INDEX_IN_MASK_END;
6351 
6352     return NVL_SUCCESS;
6353 }
6354 
6355 static NvU32
6356 nvswitch_get_num_links_lr10
6357 (
6358     nvswitch_device *device
6359 )
6360 {
6361     NvU32 num_links = NVSWITCH_NUM_LINKS_LR10;
6362     return num_links;
6363 }
6364 
6365 static NvU8
6366 nvswitch_get_num_links_per_nvlipt_lr10
6367 (
6368     nvswitch_device *device
6369 )
6370 {
6371     return NVSWITCH_LINKS_PER_NVLIPT;
6372 }
6373 
6374 NvBool
6375 nvswitch_is_link_valid_lr10
6376 (
6377     nvswitch_device *device,
6378     NvU32            link_id
6379 )
6380 {
6381     if (link_id >= nvswitch_get_num_links(device))
6382     {
6383         return NV_FALSE;
6384     }
6385     return device->link[link_id].valid;
6386 }
6387 
6388 NvlStatus
6389 nvswitch_ctrl_get_fom_values_lr10
6390 (
6391     nvswitch_device *device,
6392     NVSWITCH_GET_FOM_VALUES_PARAMS *p
6393 )
6394 {
6395     NvlStatus status;
6396     NvU32     statData;
6397     nvlink_link *link;
6398 
6399     link = nvswitch_get_link(device, p->linkId);
6400     if (link == NULL)
6401     {
6402         NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n",
6403             __FUNCTION__, p->linkId);
6404         return -NVL_BAD_ARGS;
6405     }
6406 
6407     status = nvswitch_minion_get_dl_status(device, p->linkId,
6408                                         NV_NVLSTAT_TR16, 0, &statData);
6409     p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF);
6410     p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF);
6411 
6412     status = nvswitch_minion_get_dl_status(device, p->linkId,
6413                                         NV_NVLSTAT_TR17, 0, &statData);
6414     p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF);
6415     p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF);
6416 
6417     p->numLanes = nvswitch_get_sublink_width(device, p->linkId);
6418 
6419     return status;
6420 }
6421 
6422 void
6423 nvswitch_set_fatal_error_lr10
6424 (
6425     nvswitch_device *device,
6426     NvBool           device_fatal,
6427     NvU32            link_id
6428 )
6429 {
6430     NvU32 reg;
6431 
6432     NVSWITCH_ASSERT(link_id < nvswitch_get_num_links(device));
6433 
6434     // On first fatal error, notify PORT_DOWN
6435     if (!device->link[link_id].fatal_error_occurred)
6436     {
6437         if (nvswitch_lib_notify_client_events(device,
6438                     NVSWITCH_DEVICE_EVENT_PORT_DOWN) != NVL_SUCCESS)
6439         {
6440             NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n",
6441                          __FUNCTION__);
6442         }
6443     }
6444 
6445     device->link[link_id].fatal_error_occurred = NV_TRUE;
6446 
6447     if (device_fatal)
6448     {
6449         reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6450         reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
6451                               1, reg);
6452 
6453         NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6454     }
6455     else
6456     {
6457         reg = NVSWITCH_LINK_RD32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM);
6458         reg = FLD_SET_DRF_NUM(_NPORT, _SCRATCH_WARM, _PORT_RESET_REQUIRED,
6459                               1, reg);
6460 
6461         NVSWITCH_LINK_WR32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, reg);
6462     }
6463 }
6464 
6465 static NvU32
6466 nvswitch_get_latency_sample_interval_msec_lr10
6467 (
6468     nvswitch_device *device
6469 )
6470 {
6471     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6472     return chip_device->latency_stats->sample_interval_msec;
6473 }
6474 
6475 NvU32
6476 nvswitch_get_swap_clk_default_lr10
6477 (
6478     nvswitch_device *device
6479 )
6480 {
6481     return -NVL_ERR_NOT_SUPPORTED;
6482 }
6483 
6484 NvBool
6485 nvswitch_is_link_in_use_lr10
6486 (
6487     nvswitch_device *device,
6488     NvU32 link_id
6489 )
6490 {
6491     NvU32 data;
6492     nvlink_link *link;
6493 
6494     link = nvswitch_get_link(device, link_id);
6495     if (link == NULL)
6496     {
6497         // A query on an invalid link should never occur
6498         NVSWITCH_ASSERT(link != NULL);
6499         return NV_FALSE;
6500     }
6501 
6502     if (nvswitch_is_link_in_reset(device, link))
6503     {
6504         return NV_FALSE;
6505     }
6506 
6507     data = NVSWITCH_LINK_RD32_LR10(device, link_id,
6508                                    NVLDL, _NVLDL_TOP, _LINK_STATE);
6509 
6510     return (DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data) !=
6511             NV_NVLDL_TOP_LINK_STATE_STATE_INIT);
6512 }
6513 
6514 static NvU32
6515 nvswitch_get_device_dma_width_lr10
6516 (
6517     nvswitch_device *device
6518 )
6519 {
6520     return DMA_ADDR_WIDTH_LR10;
6521 }
6522 
6523 NvU32
6524 nvswitch_get_link_ip_version_lr10
6525 (
6526     nvswitch_device *device,
6527     NvU32            link_id
6528 )
6529 {
6530     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6531     NvU32 nvldl_instance;
6532 
6533     nvldl_instance = NVSWITCH_GET_LINK_ENG_INST(device, link_id, NVLDL);
6534     if (NVSWITCH_ENG_IS_VALID(device, NVLDL, nvldl_instance))
6535     {
6536         return chip_device->engNVLDL[nvldl_instance].version;
6537     }
6538     else
6539     {
6540         NVSWITCH_PRINT(device, ERROR,
6541             "%s: NVLink[0x%x] NVLDL instance invalid\n",
6542             __FUNCTION__, link_id);
6543         return 0;
6544     }
6545 }
6546 
6547 static NvlStatus
6548 nvswitch_test_soe_dma_lr10
6549 (
6550     nvswitch_device *device
6551 )
6552 {
6553     return soeTestDma_HAL(device, (PSOE)device->pSoe);
6554 }
6555 
6556 static NvlStatus
6557 _nvswitch_get_reserved_throughput_counters
6558 (
6559     nvswitch_device *device,
6560     nvlink_link     *link,
6561     NvU16           counter_mask,
6562     NvU64           *counter_values
6563 )
6564 {
6565     NvU16 counter = 0;
6566 
6567     //
6568     // LR10 to use counters 0 & 2 for monitoring
6569     // (Same as GPU behavior)
6570     // Counter 0 counts data flits
6571     // Counter 2 counts all flits
6572     //
6573     FOR_EACH_INDEX_IN_MASK(16, counter, counter_mask)
6574     {
6575         NvU32 counter_type = NVBIT(counter);
6576         NvU64 data = 0;
6577 
6578         switch (counter_type)
6579         {
6580             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX:
6581             {
6582                 data = nvswitch_read_64bit_counter(device,
6583                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6584                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
6585                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6586                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
6587                 break;
6588             }
6589             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX:
6590             {
6591                 data = nvswitch_read_64bit_counter(device,
6592                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6593                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
6594                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6595                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
6596                 break;
6597             }
6598             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_TX:
6599             {
6600                 data = nvswitch_read_64bit_counter(device,
6601                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6602                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(2)),
6603                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6604                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(2)));
6605                 break;
6606             }
6607             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_RX:
6608             {
6609                 data = nvswitch_read_64bit_counter(device,
6610                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6611                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(2)),
6612                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6613                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(2)));
6614                 break;
6615             }
6616             default:
6617             {
6618                 return -NVL_ERR_NOT_SUPPORTED;
6619             }
6620         }
6621         counter_values[counter] = data;
6622     }
6623     FOR_EACH_INDEX_IN_MASK_END;
6624 
6625     return NVL_SUCCESS;
6626 }
6627 
6628 NvlStatus
6629 nvswitch_ctrl_get_throughput_counters_lr10
6630 (
6631     nvswitch_device *device,
6632     NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p
6633 )
6634 {
6635     NvlStatus status;
6636     nvlink_link *link;
6637     NvU16 i = 0;
6638 
6639     nvswitch_os_memset(p->counters, 0, sizeof(p->counters));
6640 
6641     FOR_EACH_INDEX_IN_MASK(64, i, p->linkMask)
6642     {
6643         link = nvswitch_get_link(device, i);
6644         if ((link == NULL) || (link->linkNumber >= NVSWITCH_MAX_PORTS) ||
6645             (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber)))
6646         {
6647             continue;
6648         }
6649 
6650         status = _nvswitch_get_reserved_throughput_counters(device, link, p->counterMask,
6651                         p->counters[link->linkNumber].values);
6652         if (status != NVL_SUCCESS)
6653         {
6654             NVSWITCH_PRINT(device, ERROR,
6655                 "Failed to get reserved NVLINK throughput counters on link %d\n",
6656                 link->linkNumber);
6657             return status;
6658         }
6659     }
6660     FOR_EACH_INDEX_IN_MASK_END;
6661 
6662     return NVL_SUCCESS;
6663 }
6664 
6665 static NvBool
6666 nvswitch_is_soe_supported_lr10
6667 (
6668     nvswitch_device *device
6669 )
6670 {
6671     if (device->regkeys.soe_disable == NV_SWITCH_REGKEY_SOE_DISABLE_YES)
6672     {
6673         NVSWITCH_PRINT(device, INFO, "SOE is disabled via regkey.\n");
6674         return NV_FALSE;
6675     }
6676 
6677     return NV_TRUE;
6678 }
6679 
6680 NvBool
6681 nvswitch_is_inforom_supported_lr10
6682 (
6683     nvswitch_device *device
6684 )
6685 {
6686     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6687     {
6688         NVSWITCH_PRINT(device, INFO,
6689             "INFOROM is not supported on non-silicon platform\n");
6690         return NV_FALSE;
6691     }
6692 
6693     if (!nvswitch_is_soe_supported(device))
6694     {
6695         NVSWITCH_PRINT(device, INFO,
6696             "INFOROM is not supported since SOE is not supported\n");
6697         return NV_FALSE;
6698     }
6699 
6700     return NV_TRUE;
6701 }
6702 
6703 NvBool
6704 nvswitch_is_spi_supported_lr10
6705 (
6706     nvswitch_device *device
6707 )
6708 {
6709     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6710     {
6711         NVSWITCH_PRINT(device, INFO,
6712             "SPI is not supported on non-silicon platforms\n");
6713         return NV_FALSE;
6714     }
6715 
6716     if (!nvswitch_is_soe_supported(device))
6717     {
6718         NVSWITCH_PRINT(device, INFO,
6719             "SPI is not supported since SOE is not supported\n");
6720         return NV_FALSE;
6721     }
6722 
6723     return NV_TRUE;
6724 }
6725 
6726 NvBool
6727 nvswitch_is_bios_supported_lr10
6728 (
6729     nvswitch_device *device
6730 )
6731 {
6732     return nvswitch_is_spi_supported(device);
6733 }
6734 
6735 NvlStatus
6736 nvswitch_get_bios_size_lr10
6737 (
6738     nvswitch_device *device,
6739     NvU32 *pSize
6740 )
6741 {
6742     return nvswitch_bios_read_size(device, pSize);
6743 }
6744 
6745 NvBool
6746 nvswitch_is_smbpbi_supported_lr10
6747 (
6748     nvswitch_device *device
6749 )
6750 {
6751     if (IS_RTLSIM(device) || IS_FMODEL(device))
6752     {
6753         NVSWITCH_PRINT(device, INFO,
6754             "SMBPBI is not supported on RTLSIM/FMODEL platforms\n");
6755         return NV_FALSE;
6756     }
6757 
6758     if (!nvswitch_is_soe_supported(device))
6759     {
6760         NVSWITCH_PRINT(device, INFO,
6761             "SMBPBI is not supported since SOE is not supported\n");
6762         return NV_FALSE;
6763     }
6764 
6765     return NV_TRUE;
6766 }
6767 
6768 /*
6769  * @Brief : Additional setup needed after device initialization
6770  *
6771  * @Description :
6772  *
6773  * @param[in] device        a reference to the device to initialize
6774  */
6775 NvlStatus
6776 nvswitch_post_init_device_setup_lr10
6777 (
6778     nvswitch_device *device
6779 )
6780 {
6781     NvlStatus retval;
6782 
6783     if (device->regkeys.soe_dma_self_test ==
6784             NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE)
6785     {
6786         NVSWITCH_PRINT(device, INFO,
6787             "Skipping SOE DMA selftest as requested using regkey\n");
6788     }
6789     else if (IS_RTLSIM(device) || IS_FMODEL(device))
6790     {
6791         NVSWITCH_PRINT(device, SETUP,
6792             "Skipping DMA selftest on FMODEL/RTLSIM platforms\n");
6793     }
6794     else if (!nvswitch_is_soe_supported(device))
6795     {
6796         NVSWITCH_PRINT(device, SETUP,
6797             "Skipping DMA selftest since SOE is not supported\n");
6798     }
6799     else
6800     {
6801         retval = nvswitch_test_soe_dma_lr10(device);
6802         if (retval != NVL_SUCCESS)
6803         {
6804             return retval;
6805         }
6806     }
6807 
6808     if (nvswitch_is_inforom_supported(device))
6809     {
6810         nvswitch_inforom_post_init(device);
6811     }
6812     else
6813     {
6814         NVSWITCH_PRINT(device, SETUP, "Skipping INFOROM init\n");
6815     }
6816 
6817     nvswitch_soe_init_l2_state(device);
6818 
6819     return NVL_SUCCESS;
6820 }
6821 
6822 /*
6823  * @Brief : Additional setup needed after blacklisted device initialization
6824  *
6825  * @Description :
6826  *
6827  * @param[in] device        a reference to the device to initialize
6828  */
6829 void
6830 nvswitch_post_init_blacklist_device_setup_lr10
6831 (
6832     nvswitch_device *device
6833 )
6834 {
6835     NvlStatus status;
6836 
6837     if (nvswitch_is_inforom_supported(device))
6838     {
6839         nvswitch_inforom_post_init(device);
6840     }
6841 
6842     //
6843     // Initialize the driver state monitoring callback.
6844     // This is still needed for SOE to report correct driver state.
6845     //
6846     status = nvswitch_smbpbi_post_init(device);
6847     if (status != NVL_SUCCESS)
6848     {
6849         NVSWITCH_PRINT(device, ERROR, "Smbpbi post init failed, rc:%d\n",
6850                        status);
6851         return;
6852     }
6853 
6854     //
6855     // This internally will only flush if OMS value has changed
6856     //
6857     status = device->hal.nvswitch_oms_inforom_flush(device);
6858     if (status != NVL_SUCCESS)
6859     {
6860         NVSWITCH_PRINT(device, ERROR, "Flushing OMS failed, rc:%d\n",
6861                        status);
6862         return;
6863     }
6864 }
6865 
6866 void
6867 nvswitch_load_uuid_lr10
6868 (
6869     nvswitch_device *device
6870 )
6871 {
6872     NvU32 regData[4];
6873 
6874     //
6875     // Read 128-bit UUID from secure scratch registers which must be
6876     // populated by firmware.
6877     //
6878     regData[0] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_8);
6879     regData[1] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_9);
6880     regData[2] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_10);
6881     regData[3] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_11);
6882 
6883     nvswitch_os_memcpy(&device->uuid.uuid, (NvU8 *)regData, NV_UUID_LEN);
6884 }
6885 
6886 NvlStatus
6887 nvswitch_read_oob_blacklist_state_lr10
6888 (
6889     nvswitch_device *device
6890 )
6891 {
6892     NvU32 reg;
6893     NvBool is_oob_blacklist;
6894     NvlStatus status;
6895 
6896     if (device == NULL)
6897     {
6898         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6899         return -NVL_BAD_ARGS;
6900     }
6901 
6902     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SCRATCH_COLD);
6903 
6904     // Check for uninitialized SCRATCH_COLD before declaring the device blacklisted
6905     if (reg == NV_NVLSAW_SCRATCH_COLD_DATA_INIT)
6906         is_oob_blacklist = NV_FALSE;
6907     else
6908         is_oob_blacklist = DRF_VAL(_NVLSAW, _SCRATCH_COLD, _OOB_BLACKLIST_DEVICE_REQUESTED, reg);
6909 
6910     status = nvswitch_inforom_oms_set_device_disable(device, is_oob_blacklist);
6911     if (status != NVL_SUCCESS)
6912     {
6913         NVSWITCH_PRINT(device, ERROR,
6914             "Failed to set device disable to %d, rc:%d\n",
6915             is_oob_blacklist, status);
6916     }
6917 
6918     if (is_oob_blacklist)
6919     {
6920         device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED;
6921         device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND;
6922     }
6923 
6924     return NVL_SUCCESS;
6925 }
6926 
6927 NvlStatus
6928 nvswitch_write_fabric_state_lr10
6929 (
6930     nvswitch_device *device
6931 )
6932 {
6933     NvU32 reg;
6934 
6935     if (device == NULL)
6936     {
6937         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6938         return -NVL_BAD_ARGS;
6939     }
6940 
6941     // bump the sequence number for each write
6942     device->fabric_state_sequence_number++;
6943 
6944     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6945 
6946     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_BLACKLIST_REASON,
6947                           device->device_blacklist_reason, reg);
6948     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_FABRIC_STATE,
6949                           device->device_fabric_state, reg);
6950     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DRIVER_FABRIC_STATE,
6951                           device->driver_fabric_state, reg);
6952     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _EVENT_MESSAGE_COUNT,
6953                           device->fabric_state_sequence_number, reg);
6954 
6955     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6956 
6957     return NVL_SUCCESS;
6958 }
6959 
6960 static NVSWITCH_ENGINE_DESCRIPTOR_TYPE *
6961 _nvswitch_get_eng_descriptor_lr10
6962 (
6963     nvswitch_device *device,
6964     NVSWITCH_ENGINE_ID eng_id
6965 )
6966 {
6967     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6968     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = NULL;
6969 
6970     if (eng_id >= NVSWITCH_ENGINE_ID_SIZE)
6971     {
6972         NVSWITCH_PRINT(device, ERROR,
6973             "%s: Engine_ID 0x%x out of range 0..0x%x\n",
6974             __FUNCTION__,
6975             eng_id, NVSWITCH_ENGINE_ID_SIZE-1);
6976         return NULL;
6977     }
6978 
6979     engine = &(chip_device->io.common[eng_id]);
6980     NVSWITCH_ASSERT(eng_id == engine->eng_id);
6981 
6982     return engine;
6983 }
6984 
6985 NvU32
6986 nvswitch_get_eng_base_lr10
6987 (
6988     nvswitch_device *device,
6989     NVSWITCH_ENGINE_ID eng_id,
6990     NvU32 eng_bcast,
6991     NvU32 eng_instance
6992 )
6993 {
6994     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
6995     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
6996 
6997     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
6998     if (engine == NULL)
6999     {
7000         NVSWITCH_PRINT(device, ERROR,
7001             "%s: ID 0x%x[%d] %s not found\n",
7002             __FUNCTION__,
7003             eng_id, eng_instance,
7004             (
7005                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7006                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7007                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7008                 "??"
7009             ));
7010         return NVSWITCH_BASE_ADDR_INVALID;
7011     }
7012 
7013     if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) &&
7014         (eng_instance < engine->eng_count))
7015     {
7016         base_addr = engine->uc_addr[eng_instance];
7017     }
7018     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7019     {
7020         base_addr = engine->bc_addr;
7021     }
7022     else if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) &&
7023         (eng_instance < engine->mc_addr_count))
7024     {
7025         base_addr = engine->mc_addr[eng_instance];
7026     }
7027     else
7028     {
7029         NVSWITCH_PRINT(device, ERROR,
7030             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7031             __FUNCTION__,
7032             eng_bcast);
7033     }
7034 
7035     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7036     {
7037         NVSWITCH_PRINT(device, ERROR,
7038             "%s: ID 0x%x[%d] %s invalid address\n",
7039             __FUNCTION__,
7040             eng_id, eng_instance,
7041             (
7042                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7043                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7044                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7045                 "??"
7046             ));
7047     }
7048 
7049     return base_addr;
7050 }
7051 
7052 NvU32
7053 nvswitch_get_eng_count_lr10
7054 (
7055     nvswitch_device *device,
7056     NVSWITCH_ENGINE_ID eng_id,
7057     NvU32 eng_bcast
7058 )
7059 {
7060     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
7061     NvU32 eng_count = 0;
7062 
7063     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7064     if (engine == NULL)
7065     {
7066         NVSWITCH_PRINT(device, ERROR,
7067             "%s: ID 0x%x %s not found\n",
7068             __FUNCTION__,
7069             eng_id,
7070             (
7071                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7072                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7073                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7074                 "??"
7075             ));
7076         return 0;
7077     }
7078 
7079     if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST)
7080     {
7081         eng_count = engine->eng_count;
7082     }
7083     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7084     {
7085         if (engine->bc_addr == NVSWITCH_BASE_ADDR_INVALID)
7086         {
7087             eng_count = 0;
7088         }
7089         else
7090         {
7091             eng_count = 1;
7092         }
7093     }
7094     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST)
7095     {
7096         eng_count = engine->mc_addr_count;
7097     }
7098     else
7099     {
7100         NVSWITCH_PRINT(device, ERROR,
7101             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7102             __FUNCTION__,
7103             eng_bcast);
7104     }
7105 
7106     return eng_count;
7107 }
7108 
7109 NvU32
7110 nvswitch_eng_rd_lr10
7111 (
7112     nvswitch_device *device,
7113     NVSWITCH_ENGINE_ID eng_id,
7114     NvU32 eng_bcast,
7115     NvU32 eng_instance,
7116     NvU32 offset
7117 )
7118 {
7119     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7120     NvU32 data;
7121 
7122     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7123     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7124     {
7125         NVSWITCH_PRINT(device, ERROR,
7126             "%s: ID 0x%x[%d] %s invalid address\n",
7127             __FUNCTION__,
7128             eng_id, eng_instance,
7129             (
7130                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7131                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7132                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7133                 "??"
7134             ));
7135         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7136         return 0xBADFBADF;
7137     }
7138 
7139     data = nvswitch_reg_read_32(device, base_addr + offset);
7140 
7141 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7142     {
7143         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7144 
7145         NVSWITCH_PRINT(device, MMIO,
7146             "%s: ENG_RD %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7147             __FUNCTION__,
7148             engine->eng_name, engine->eng_id,
7149             eng_instance,
7150             base_addr, offset,
7151             data);
7152     }
7153 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7154 
7155     return data;
7156 }
7157 
7158 void
7159 nvswitch_eng_wr_lr10
7160 (
7161     nvswitch_device *device,
7162     NVSWITCH_ENGINE_ID eng_id,
7163     NvU32 eng_bcast,
7164     NvU32 eng_instance,
7165     NvU32 offset,
7166     NvU32 data
7167 )
7168 {
7169     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7170 
7171     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7172     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7173     {
7174         NVSWITCH_PRINT(device, ERROR,
7175             "%s: ID 0x%x[%d] %s invalid address\n",
7176             __FUNCTION__,
7177             eng_id, eng_instance,
7178             (
7179                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7180                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7181                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7182                 "??"
7183             ));
7184         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7185         return;
7186     }
7187 
7188     nvswitch_reg_write_32(device, base_addr + offset,  data);
7189 
7190 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7191     {
7192         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7193 
7194         NVSWITCH_PRINT(device, MMIO,
7195             "%s: ENG_WR %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7196             __FUNCTION__,
7197             engine->eng_name, engine->eng_id,
7198             eng_instance,
7199             base_addr, offset,
7200             data);
7201     }
7202 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7203 }
7204 
7205 NvU32
7206 nvswitch_get_link_eng_inst_lr10
7207 (
7208     nvswitch_device *device,
7209     NvU32 link_id,
7210     NVSWITCH_ENGINE_ID eng_id
7211 )
7212 {
7213     NvU32   eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7214 
7215     if (link_id >= NVSWITCH_LINK_COUNT(device))
7216     {
7217         NVSWITCH_PRINT(device, ERROR,
7218             "%s: link ID 0x%x out-of-range [0x0..0x%x]\n",
7219             __FUNCTION__,
7220             link_id, NVSWITCH_LINK_COUNT(device)-1);
7221         return NVSWITCH_ENGINE_INSTANCE_INVALID;
7222     }
7223 
7224     switch (eng_id)
7225     {
7226         case NVSWITCH_ENGINE_ID_NPG:
7227             eng_instance = link_id / NVSWITCH_LINKS_PER_NPG;
7228             break;
7229         case NVSWITCH_ENGINE_ID_NVLIPT:
7230             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLIPT;
7231             break;
7232         case NVSWITCH_ENGINE_ID_NVLW:
7233         case NVSWITCH_ENGINE_ID_NVLW_PERFMON:
7234             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLW;
7235             break;
7236         case NVSWITCH_ENGINE_ID_MINION:
7237             eng_instance = link_id / NVSWITCH_LINKS_PER_MINION;
7238             break;
7239         case NVSWITCH_ENGINE_ID_NPORT:
7240         case NVSWITCH_ENGINE_ID_NVLTLC:
7241         case NVSWITCH_ENGINE_ID_NVLDL:
7242         case NVSWITCH_ENGINE_ID_NVLIPT_LNK:
7243         case NVSWITCH_ENGINE_ID_NPORT_PERFMON:
7244             eng_instance = link_id;
7245             break;
7246         default:
7247             NVSWITCH_PRINT(device, ERROR,
7248                 "%s: link ID 0x%x has no association with EngID 0x%x\n",
7249                 __FUNCTION__,
7250                 link_id, eng_id);
7251             eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7252             break;
7253     }
7254 
7255     return eng_instance;
7256 }
7257 
7258 NvU32
7259 nvswitch_get_caps_nvlink_version_lr10
7260 (
7261     nvswitch_device *device
7262 )
7263 {
7264     ct_assert(NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0 ==
7265                 NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0);
7266     return NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0;
7267 }
7268 
7269 NVSWITCH_BIOS_NVLINK_CONFIG *
7270 nvswitch_get_bios_nvlink_config_lr10
7271 (
7272     nvswitch_device *device
7273 )
7274 {
7275     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
7276 
7277     return (chip_device != NULL) ? &chip_device->bios_config : NULL;
7278 }
7279 
7280 /*
7281  * CTRL_NVSWITCH_SET_RESIDENCY_BINS
7282  */
7283 static NvlStatus
7284 nvswitch_ctrl_set_residency_bins_lr10
7285 (
7286     nvswitch_device *device,
7287     NVSWITCH_SET_RESIDENCY_BINS *p
7288 )
7289 {
7290     NVSWITCH_PRINT(device, ERROR,
7291         "SET_RESIDENCY_BINS should not be called on LR10\n");
7292     return -NVL_ERR_NOT_SUPPORTED;
7293 }
7294 
7295 /*
7296  * CTRL_NVSWITCH_GET_RESIDENCY_BINS
7297  */
7298 static NvlStatus
7299 nvswitch_ctrl_get_residency_bins_lr10
7300 (
7301     nvswitch_device *device,
7302     NVSWITCH_GET_RESIDENCY_BINS *p
7303 )
7304 {
7305     NVSWITCH_PRINT(device, ERROR,
7306         "GET_RESIDENCY_BINS should not be called on LR10\n");
7307     return -NVL_ERR_NOT_SUPPORTED;
7308 }
7309 
7310 /*
7311  * CTRL_NVSWITCH_GET_RB_STALL_BUSY
7312  */
7313 static NvlStatus
7314 nvswitch_ctrl_get_rb_stall_busy_lr10
7315 (
7316     nvswitch_device *device,
7317     NVSWITCH_GET_RB_STALL_BUSY *p
7318 )
7319 {
7320     NVSWITCH_PRINT(device, ERROR,
7321         "GET_RB_STALL_BUSY should not be called on LR10\n");
7322     return -NVL_ERR_NOT_SUPPORTED;
7323 }
7324 
7325 /*
7326  * CTRL_NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR
7327  */
7328 static NvlStatus
7329 nvswitch_ctrl_get_multicast_id_error_vector_lr10
7330 (
7331     nvswitch_device *device,
7332     NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR *p
7333 )
7334 {
7335     NVSWITCH_PRINT(device, ERROR,
7336         "GET_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7337     return -NVL_ERR_NOT_SUPPORTED;
7338 }
7339 
7340 /*
7341  * CTRL_NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR
7342  */
7343 static NvlStatus
7344 nvswitch_ctrl_clear_multicast_id_error_vector_lr10
7345 (
7346     nvswitch_device *device,
7347     NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR *p
7348 )
7349 {
7350     NVSWITCH_PRINT(device, ERROR,
7351         "CLEAR_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7352     return -NVL_ERR_NOT_SUPPORTED;
7353 }
7354 
7355 void
7356 nvswitch_send_inband_nack_lr10
7357 (
7358     nvswitch_device *device,
7359     NvU32 *msghdr,
7360     NvU32  linkId
7361 )
7362 {
7363     return;
7364 }
7365 
7366 NvU32
7367 nvswitch_get_max_persistent_message_count_lr10
7368 (
7369     nvswitch_device *device
7370 )
7371 {
7372     return 0;
7373 }
7374 
7375 /*
7376  * CTRL_NVSWITCH_INBAND_SEND_DATA
7377  */
7378 NvlStatus
7379 nvswitch_ctrl_inband_send_data_lr10
7380 (
7381     nvswitch_device *device,
7382     NVSWITCH_INBAND_SEND_DATA_PARAMS *p
7383 )
7384 {
7385     return -NVL_ERR_NOT_SUPPORTED;
7386 }
7387 
7388 /*
7389  * CTRL_NVSWITCH_INBAND_RECEIVE_DATA
7390  */
7391 NvlStatus
7392 nvswitch_ctrl_inband_read_data_lr10
7393 (
7394     nvswitch_device *device,
7395     NVSWITCH_INBAND_READ_DATA_PARAMS *p
7396 )
7397 {
7398     return -NVL_ERR_NOT_SUPPORTED;
7399 }
7400 
7401 /*
7402  * CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
7403  */
7404 NvlStatus
7405 nvswitch_ctrl_get_board_part_number_lr10
7406 (
7407     nvswitch_device *device,
7408     NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
7409 )
7410 {
7411     struct inforom *pInforom = device->pInforom;
7412     INFOROM_OBD_OBJECT_V1_XX *pOBDObj;
7413     int byteIdx;
7414 
7415     if (pInforom == NULL)
7416     {
7417         return -NVL_ERR_NOT_SUPPORTED;
7418     }
7419 
7420     if (!pInforom->OBD.bValid)
7421     {
7422         NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
7423         return -NVL_ERR_GENERIC;
7424     }
7425 
7426     pOBDObj = &pInforom->OBD.object.v1;
7427 
7428     if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008))
7429     {
7430         NVSWITCH_PRINT(device, ERROR,
7431                        "board part number available size %lu is not same as the request size %lu\n",
7432                        sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data));
7433         return -NVL_ERR_GENERIC;
7434     }
7435 
7436     nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
7437 
7438     /* Copy board type data */
7439     for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++)
7440     {
7441         p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF);
7442     }
7443 
7444     return NVL_SUCCESS;
7445 }
7446 
7447 /*
7448 * @brief: This function retrieves the NVLIPT public ID for a given global link idx
7449 * @params[in]  device        reference to current nvswitch device
7450 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7451 * @params[out] publicId      Public ID of NVLIPT owning linkId
7452 */
7453 NvlStatus nvswitch_get_link_public_id_lr10
7454 (
7455     nvswitch_device *device,
7456     NvU32 linkId,
7457     NvU32 *publicId
7458 )
7459 {
7460     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7461         (publicId == NULL))
7462     {
7463         return -NVL_BAD_ARGS;
7464     }
7465 
7466     *publicId = NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(linkId);
7467 
7468 
7469     return (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, *publicId)) ?
7470                 NVL_SUCCESS : -NVL_BAD_ARGS;
7471 }
7472 
7473 /*
7474 * @brief: This function retrieves the internal link idx for a given global link idx
7475 * @params[in]  device        reference to current nvswitch device
7476 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7477 * @params[out] localLinkIdx  Internal link index of linkId
7478 */
7479 NvlStatus nvswitch_get_link_local_idx_lr10
7480 (
7481     nvswitch_device *device,
7482     NvU32 linkId,
7483     NvU32 *localLinkIdx
7484 )
7485 {
7486     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7487         (localLinkIdx == NULL))
7488     {
7489         return -NVL_BAD_ARGS;
7490     }
7491 
7492     *localLinkIdx = NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(linkId);
7493 
7494     return NVL_SUCCESS;
7495 }
7496 
7497 NvlStatus nvswitch_set_training_error_info_lr10
7498 (
7499     nvswitch_device *device,
7500     NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams
7501 )
7502 {
7503     NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo;
7504     NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo;
7505 
7506     linkTrainingErrorInfo.isValid = NV_TRUE;
7507     linkTrainingErrorInfo.attemptedTrainingMask0 =
7508         pLinkTrainingErrorInfoParams->attemptedTrainingMask0;
7509     linkTrainingErrorInfo.trainingErrorMask0 =
7510         pLinkTrainingErrorInfoParams->trainingErrorMask0;
7511 
7512     linkRuntimeErrorInfo.isValid = NV_FALSE;
7513     linkRuntimeErrorInfo.mask0   = 0;
7514 
7515     return nvswitch_smbpbi_set_link_error_info(device,
7516                                                &linkTrainingErrorInfo,
7517                                                &linkRuntimeErrorInfo);
7518 }
7519 
7520 NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10
7521 (
7522     nvswitch_device *device,
7523     NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams
7524 )
7525 {
7526     NvU32 linkId;
7527     NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
7528     pParams->device = FLD_TEST_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
7529                                        1, reg);
7530 
7531     for (linkId = 0; linkId < NVSWITCH_MAX_PORTS; linkId++)
7532     {
7533         if (!nvswitch_is_link_valid(device, linkId))
7534         {
7535             pParams->port[linkId] = NV_FALSE;
7536             continue;
7537         }
7538 
7539         reg = NVSWITCH_LINK_RD32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7540         pParams->port[linkId] = FLD_TEST_DRF_NUM(_NPORT, _SCRATCH_WARM,
7541                                                  _PORT_RESET_REQUIRED, 1, reg);
7542     }
7543 
7544     return NVL_SUCCESS;
7545 }
7546 
7547 NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10
7548 (
7549     nvswitch_device *device,
7550     NVSWITCH_SET_MC_RID_TABLE_PARAMS *p
7551 )
7552 {
7553     return -NVL_ERR_NOT_SUPPORTED;
7554 }
7555 
7556 NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10
7557 (
7558     nvswitch_device *device,
7559     NVSWITCH_GET_MC_RID_TABLE_PARAMS *p
7560 )
7561 {
7562     return -NVL_ERR_NOT_SUPPORTED;
7563 }
7564 
7565 void nvswitch_init_scratch_lr10
7566 (
7567     nvswitch_device *device
7568 )
7569 {
7570     NvU32 linkId;
7571     NvU32 reg;
7572 
7573     for (linkId = 0; linkId < nvswitch_get_num_links(device); linkId++)
7574     {
7575         if (!nvswitch_is_link_valid(device, linkId))
7576         {
7577             continue;
7578         }
7579 
7580         reg = NVSWITCH_LINK_RD32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7581         if (reg == NV_NPORT_SCRATCH_WARM_DATA_INIT)
7582         {
7583             NVSWITCH_LINK_WR32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM, 0);
7584         }
7585     }
7586 }
7587 
7588 NvlStatus
7589 nvswitch_launch_ALI_lr10
7590 (
7591     nvswitch_device *device
7592 )
7593 {
7594     return -NVL_ERR_NOT_SUPPORTED;
7595 }
7596 
7597 NvlStatus
7598 nvswitch_set_training_mode_lr10
7599 (
7600     nvswitch_device *device
7601 )
7602 {
7603     return NVL_SUCCESS;
7604 }
7605 
7606 NvlStatus
7607 nvswitch_parse_bios_image_lr10
7608 (
7609     nvswitch_device *device
7610 )
7611 {
7612     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config;
7613     NV_STATUS status = NV_OK;
7614 
7615     // check if spi is supported
7616     if (!nvswitch_is_bios_supported(device))
7617     {
7618         NVSWITCH_PRINT(device, ERROR,
7619                 "%s: BIOS is not supported\n",
7620                 __FUNCTION__);
7621         return -NVL_ERR_NOT_SUPPORTED;
7622     }
7623 
7624     bios_config = nvswitch_get_bios_nvlink_config(device);
7625 
7626     // Parse and retrieve the VBIOS info
7627     status = _nvswitch_setup_link_vbios_overrides(device, bios_config);
7628     if ((status != NV_OK) && device->pSoe)
7629     {
7630         //To enable LS10 bringup (VBIOS is not ready and SOE is disabled), fail the device init only when SOE is enabled and vbios overrides has failed
7631         NVSWITCH_PRINT(device, ERROR,
7632                 "%s: error=0x%x\n",
7633                 __FUNCTION__, status);
7634 
7635         return -NVL_ERR_GENERIC;
7636     }
7637 
7638     return NVL_SUCCESS;
7639 }
7640 
7641 NvlStatus
7642 nvswitch_ctrl_get_nvlink_lp_counters_lr10
7643 (
7644     nvswitch_device *device,
7645     NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params
7646 )
7647 {
7648     return -NVL_ERR_NOT_SUPPORTED;
7649 }
7650 
7651 NvlStatus
7652 nvswitch_ctrl_get_sw_info_lr10
7653 (
7654     nvswitch_device *device,
7655     NVSWITCH_GET_SW_INFO_PARAMS *p
7656 )
7657 {
7658     NvlStatus retval = NVL_SUCCESS;
7659     NvU32 i;
7660 
7661     if (p->count > NVSWITCH_GET_SW_INFO_COUNT_MAX)
7662     {
7663         NVSWITCH_PRINT(device, ERROR,
7664             "%s: Invalid args\n",
7665             __FUNCTION__);
7666         return -NVL_BAD_ARGS;
7667     }
7668 
7669     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_SW_INFO_COUNT_MAX);
7670 
7671     for (i = 0; i < p->count; i++)
7672     {
7673         switch (p->index[i])
7674         {
7675             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED:
7676                 p->info[i] = NV_TRUE;
7677                 break;
7678             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED:
7679                 p->info[i] = (NvU32)_nvswitch_inforom_bbx_supported(device);
7680                 break;
7681             default:
7682                 NVSWITCH_PRINT(device, ERROR,
7683                     "%s: Undefined NVSWITCH_GET_SW_INFO_INDEX 0x%x\n",
7684                     __FUNCTION__,
7685                     p->index[i]);
7686                 retval = -NVL_BAD_ARGS;
7687                 break;
7688         }
7689     }
7690 
7691     return retval;
7692 }
7693 
7694 NvlStatus
7695 nvswitch_ctrl_get_err_info_lr10
7696 (
7697     nvswitch_device *device,
7698     NVSWITCH_NVLINK_GET_ERR_INFO_PARAMS *ret
7699 )
7700 {
7701     nvlink_link *link;
7702     NvU32 data;
7703     NvU8 i;
7704 
7705      ret->linkMask = nvswitch_get_enabled_link_mask(device);
7706 
7707     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7708     {
7709         link = nvswitch_get_link(device, i);
7710 
7711         if ((link == NULL) ||
7712             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
7713             (i >= NVSWITCH_NVLINK_MAX_LINKS))
7714         {
7715             continue;
7716         }
7717 
7718         // TODO NVidia TL not supported
7719         NVSWITCH_PRINT(device, WARN,
7720             "%s WARNING: Nvidia %s register %s does not exist!\n",
7721             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_ERRLOG_REG");
7722 
7723         NVSWITCH_PRINT(device, WARN,
7724             "%s WARNING: Nvidia %s register %s does not exist!\n",
7725             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_INTEN_REG");
7726 
7727         ret->linkErrInfo[i].TLErrlog = 0x0;
7728         ret->linkErrInfo[i].TLIntrEn = 0x0;
7729 
7730         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
7731         ret->linkErrInfo[i].DLSpeedStatusTx =
7732             DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, data);
7733 
7734         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
7735         ret->linkErrInfo[i].DLSpeedStatusRx =
7736             DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, data);
7737 
7738         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR);
7739         ret->linkErrInfo[i].bExcessErrorDL =
7740             !!DRF_VAL(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, data);
7741 
7742         if (ret->linkErrInfo[i].bExcessErrorDL)
7743         {
7744             NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR,
7745                 DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 0x1));
7746         }
7747     }
7748     FOR_EACH_INDEX_IN_MASK_END;
7749 
7750     return NVL_SUCCESS;
7751 }
7752 
7753 static NvlStatus
7754 nvswitch_ctrl_clear_counters_lr10
7755 (
7756     nvswitch_device *device,
7757     NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret
7758 )
7759 {
7760     nvlink_link *link;
7761     NvU8 i;
7762     NvU32 counterMask;
7763     NvlStatus status = NVL_SUCCESS;
7764 
7765     counterMask = ret->counterMask;
7766 
7767     // Common usage allows one of these to stand for all of them
7768     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7769                         | NVSWITCH_NVLINK_COUNTER_TL_TX1
7770                         | NVSWITCH_NVLINK_COUNTER_TL_RX0
7771                         | NVSWITCH_NVLINK_COUNTER_TL_RX1
7772                         ))
7773     {
7774         counterMask |= ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7775                        | NVSWITCH_NVLINK_COUNTER_TL_TX1
7776                        | NVSWITCH_NVLINK_COUNTER_TL_RX0
7777                        | NVSWITCH_NVLINK_COUNTER_TL_RX1
7778                        );
7779     }
7780 
7781     // Common usage allows one of these to stand for all of them
7782     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7783                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7784                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7785                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7786                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7787                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7788                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7789                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7790                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7791                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7792                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7793                         ))
7794     {
7795         counterMask |= ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7796                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7797                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7798                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7799                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7800                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7801                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7802                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7803                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7804                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7805                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7806                        );
7807     }
7808 
7809     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7810     {
7811         link = nvswitch_get_link(device, i);
7812         if (link == NULL)
7813         {
7814             continue;
7815         }
7816 
7817         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber))
7818         {
7819             nvswitch_ctrl_clear_throughput_counters_lr10(device, link, counterMask);
7820         }
7821         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
7822         {
7823             status = nvswitch_ctrl_clear_dl_error_counters_lr10(device, link, counterMask);
7824             // Return early with failure on clearing through minion
7825             if (status != NVL_SUCCESS)
7826             {
7827                 NVSWITCH_PRINT(device, ERROR,
7828                     "%s: Failure on clearing link counter mask 0x%x on link %d\n",
7829                     __FUNCTION__, counterMask, link->linkNumber);
7830                 break;
7831             }
7832         }
7833     }
7834     FOR_EACH_INDEX_IN_MASK_END;
7835 
7836     return status;
7837 }
7838 
7839 NvlStatus
7840 nvswitch_ctrl_set_nvlink_error_threshold_lr10
7841 (
7842     nvswitch_device *device,
7843     NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7844 )
7845 {
7846     return -NVL_ERR_NOT_SUPPORTED;
7847 }
7848 
7849 static NvlStatus
7850 nvswitch_ctrl_get_nvlink_error_threshold_lr10
7851 (
7852     nvswitch_device *device,
7853     NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7854 )
7855 {
7856     return -NVL_ERR_NOT_SUPPORTED;
7857 }
7858 
7859 //
7860 // This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
7861 // macro in haldef_nvswitch.h
7862 //
7863 // Note: All hal fns must be implemented for each chip.
7864 //       There is no automatic stubbing here.
7865 //
7866 void nvswitch_setup_hal_lr10(nvswitch_device *device)
7867 {
7868     device->chip_arch = NVSWITCH_GET_INFO_INDEX_ARCH_LR10;
7869 
7870     {
7871         device->chip_impl = NVSWITCH_GET_INFO_INDEX_IMPL_LR10;
7872     }
7873 
7874     NVSWITCH_INIT_HAL(device, lr10);
7875     NVSWITCH_INIT_HAL_LS10(device, lr10);
7876 }
7877