1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "common_nvswitch.h"
25 #include "error_nvswitch.h"
26 #include "regkey_nvswitch.h"
27 #include "haldef_nvswitch.h"
28 #include "lr10/lr10.h"
29 #include "lr10/clock_lr10.h"
30 #include "lr10/minion_lr10.h"
31 #include "lr10/soe_lr10.h"
32 #include "lr10/pmgr_lr10.h"
33 #include "lr10/therm_lr10.h"
34 #include "lr10/inforom_lr10.h"
35 #include "lr10/smbpbi_lr10.h"
36 #include "flcn/flcnable_nvswitch.h"
37 #include "soe/soe_nvswitch.h"
38 
39 #include "nvswitch/lr10/dev_nvs_top.h"
40 #include "nvswitch/lr10/dev_pri_ringmaster.h"
41 #include "nvswitch/lr10/dev_pri_ringstation_sys.h"
42 #include "nvswitch/lr10/dev_nvlsaw_ip.h"
43 #include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h"
44 #include "nvswitch/lr10/dev_nvs_master.h"
45 #include "nvswitch/lr10/dev_nvltlc_ip.h"
46 #include "nvswitch/lr10/dev_nvldl_ip.h"
47 #include "nvswitch/lr10/dev_nvlipt_lnk_ip.h"
48 #include "nvswitch/lr10/dev_nvlctrl_ip.h"
49 #include "nvswitch/lr10/dev_npg_ip.h"
50 #include "nvswitch/lr10/dev_npgperf_ip.h"
51 #include "nvswitch/lr10/dev_nport_ip.h"
52 #include "nvswitch/lr10/dev_ingress_ip.h"
53 #include "nvswitch/lr10/dev_tstate_ip.h"
54 #include "nvswitch/lr10/dev_egress_ip.h"
55 #include "nvswitch/lr10/dev_route_ip.h"
56 #include "nvswitch/lr10/dev_therm.h"
57 #include "nvswitch/lr10/dev_soe_ip.h"
58 #include "nvswitch/lr10/dev_route_ip_addendum.h"
59 #include "nvswitch/lr10/dev_minion_ip.h"
60 #include "nvswitch/lr10/dev_minion_ip_addendum.h"
61 #include "nvswitch/lr10/dev_nport_ip_addendum.h"
62 #include "nvswitch/lr10/dev_nxbar_tile_ip.h"
63 #include "nvswitch/lr10/dev_nxbar_tc_global_ip.h"
64 #include "nvswitch/lr10/dev_sourcetrack_ip.h"
65 
66 #include "oob/smbpbi.h"
67 
68 #define DMA_ADDR_WIDTH_LR10     64
69 #define ROUTE_GANG_TABLE_SIZE (1 << DRF_SIZE(NV_ROUTE_REG_TABLE_ADDRESS_INDEX))
70 
71 static void
72 _nvswitch_deassert_link_resets_lr10
73 (
74     nvswitch_device *device
75 )
76 {
77     NvU32 val, i;
78     NVSWITCH_TIMEOUT timeout;
79     NvBool           keepPolling;
80 
81     NVSWITCH_PRINT(device, WARN,
82         "%s: NVSwitch Driver is taking the links out of reset. This should only happen during forced config.\n",
83         __FUNCTION__);
84 
85     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
86     {
87         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
88 
89         val = NVSWITCH_LINK_RD32_LR10(device, i,
90                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
91         val = FLD_SET_DRF_NUM(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, _LINK_RESET,
92                           NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_DEASSERT, val);
93 
94         NVSWITCH_LINK_WR32_LR10(device, i,
95                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, val);
96     }
97 
98     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
99     {
100         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
101 
102         // Poll for _RESET_STATUS == _DEASSERTED
103         nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
104 
105         do
106         {
107             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
108 
109             val = NVSWITCH_LINK_RD32_LR10(device, i,
110                     NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
111             if (FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
112                         _LINK_RESET_STATUS, _DEASSERTED, val))
113             {
114                 break;
115             }
116 
117             nvswitch_os_sleep(1);
118         }
119         while (keepPolling);
120 
121         if (!FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
122                     _LINK_RESET_STATUS, _DEASSERTED, val))
123         {
124             NVSWITCH_PRINT(device, ERROR,
125                 "%s: Timeout waiting for link %d_LINK_RESET_STATUS == _DEASSERTED\n",
126                 __FUNCTION__, i);
127                 // Bug 2974064: Review this timeout handling (fall through)
128         }
129     }
130 }
131 
132 static void
133 _nvswitch_train_forced_config_link_lr10
134 (
135     nvswitch_device *device,
136     NvU32            linkId
137 )
138 {
139     NvU32 data, i;
140     nvlink_link *link;
141 
142     link = nvswitch_get_link(device, linkId);
143 
144     if ((link == NULL) ||
145         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
146         (linkId >= NVSWITCH_NVLINK_MAX_LINKS))
147     {
148         return;
149     }
150 
151     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
152     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_HWCFG, _ENABLE, data);
153     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
154 
155     // Add some delay to let the sim/emu go to SAFE
156     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
157 
158     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
159     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_NVHS, _ENABLE, data);
160     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
161 
162     // Add some delay to let the sim/emu go to HS
163     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
164 
165     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
166     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE,      _ACTIVE, data);
167     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, data);
168     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION,        _LTSSM_CHANGE, data);
169     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE, data);
170 
171     i = 0;
172 
173     // Poll until LINK_CHANGE[1:0] != 2b01.
174     while (i < 5)
175     {
176         data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
177 
178         if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _BUSY, data))
179         {
180             NVSWITCH_PRINT(device, INFO,
181                 "%s : Waiting for link %d to go to ACTIVE\n",
182                 __FUNCTION__, linkId);
183         }
184         else if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _FAULT, data))
185         {
186             NVSWITCH_PRINT(device, ERROR,
187                 "%s : Fault while changing LINK to ACTIVE. Link = %d\n",
188                 __FUNCTION__, linkId);
189             break;
190         }
191         else
192         {
193             break;
194         }
195 
196         NVSWITCH_NSEC_DELAY(5 * NVSWITCH_INTERVAL_1USEC_IN_NS);
197         i++;
198     }
199 
200     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_STATE);
201 
202     if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_STATE, _STATE, _ACTIVE, data))
203     {
204         NVSWITCH_PRINT(device, INFO,
205             "%s : Link %d is in ACTIVE state, setting BUFFER_READY\n",
206             __FUNCTION__, linkId);
207 
208         // Set buffer ready only for nvlink TLC and not NPORT
209         nvswitch_init_buffer_ready(device, link, NV_FALSE);
210     }
211     else
212     {
213         NVSWITCH_PRINT(device, ERROR,
214             "%s : Timeout while waiting for link %d to go to ACTIVE\n",
215             __FUNCTION__, linkId);
216         NVSWITCH_PRINT(device, ERROR,
217             "%s : Link %d is in 0x%x state\n",
218             __FUNCTION__, linkId,DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data));
219     }
220 
221 }
222 
223 void
224 _nvswitch_setup_chiplib_forced_config_lr10
225 (
226     nvswitch_device *device
227 )
228 {
229     NvU64 links = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) +
230                   ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32);
231     NvU32 i;
232 
233     if (links == 0)
234     {
235         return;
236     }
237 
238     //
239     // First, take the links out of reset
240     //
241     // NOTE: On LR10, MINION will take the links out of reset during INITPHASE1
242     // On platforms where MINION is not present and/or we want to run with forced
243     // config, the driver must de-assert the link reset
244     //
245     _nvswitch_deassert_link_resets_lr10(device);
246 
247     // Next, train the links to ACTIVE/NVHS
248     FOR_EACH_INDEX_IN_MASK(64, i, links)
249     {
250         if (device->link[i].valid)
251         {
252             _nvswitch_train_forced_config_link_lr10(device, i);
253         }
254     }
255     FOR_EACH_INDEX_IN_MASK_END;
256 }
257 
258 /*!
259  * @brief Parse packed little endian data and unpack into padded structure
260  *
261  * @param[in]   format          Data format
262  * @param[in]   packedData      Packed little endian data
263  * @param[out]  unpackedData    Unpacked padded structure
264  * @param[out]  unpackedSize    Unpacked data size
265  * @param[out]  fieldsCount     Number of fields
266  *
267  * @return 'NV_OK'
268  */
269 NV_STATUS
270 _nvswitch_devinit_unpack_structure
271 (
272     const char *format,
273     const NvU8 *packedData,
274     NvU32      *unpackedData,
275     NvU32      *unpackedSize,
276     NvU32      *fieldsCount
277 )
278 {
279     NvU32 unpkdSize = 0;
280     NvU32 fields = 0;
281     NvU32 count;
282     NvU32 data;
283     char fmt;
284 
285     while ((fmt = *format++))
286     {
287         count = 0;
288         while ((fmt >= '0') && (fmt <= '9'))
289         {
290             count *= 10;
291             count += fmt - '0';
292             fmt = *format++;
293         }
294         if (count == 0)
295             count = 1;
296 
297         while (count--)
298         {
299             switch (fmt)
300             {
301                 case 'b':
302                     data = *packedData++;
303                     unpkdSize += 1;
304                     break;
305 
306                 case 's':    // signed byte
307                     data = *packedData++;
308                     if (data & 0x80)
309                         data |= ~0xff;
310                     unpkdSize += 1;
311                     break;
312 
313                 case 'w':
314                     data  = *packedData++;
315                     data |= *packedData++ << 8;
316                     unpkdSize += 2;
317                     break;
318 
319                 case 'd':
320                     data  = *packedData++;
321                     data |= *packedData++ << 8;
322                     data |= *packedData++ << 16;
323                     data |= *packedData++ << 24;
324                     unpkdSize += 4;
325                     break;
326 
327                 default:
328                     return NV_ERR_GENERIC;
329             }
330             *unpackedData++ = data;
331             fields++;
332         }
333     }
334 
335     if (unpackedSize != NULL)
336         *unpackedSize = unpkdSize;
337 
338     if (fieldsCount != NULL)
339         *fieldsCount = fields;
340 
341     return NV_OK;
342 }
343 
344 /*!
345  * @brief Calculate packed and unpacked data size based on given data format
346  *
347  * @param[in]   format          Data format
348  * @param[out]  packedSize      Packed data size
349  * @param[out]  unpackedSize    Unpacked data size
350  *
351  */
352 void
353 _nvswitch_devinit_calculate_sizes
354 (
355     const char *format,
356     NvU32      *packedSize,
357     NvU32      *unpackedSize
358 )
359 {
360     NvU32 unpkdSize = 0;
361     NvU32 pkdSize = 0;
362     NvU32 count;
363     char fmt;
364 
365     while ((fmt = *format++))
366     {
367         count = 0;
368         while ((fmt >= '0') && (fmt <= '9'))
369         {
370             count *= 10;
371             count += fmt - '0';
372             fmt = *format++;
373         }
374         if (count == 0)
375             count = 1;
376 
377         switch (fmt)
378         {
379             case 'b':
380                 pkdSize += count * 1;
381                 unpkdSize += count * sizeof(bios_U008);
382                 break;
383 
384             case 's':    // signed byte
385                 pkdSize += count * 1;
386                 unpkdSize += count * sizeof(bios_S008);
387                 break;
388 
389             case 'w':
390                 pkdSize += count * 2;
391                 unpkdSize += count * sizeof(bios_U016);
392                 break;
393 
394             case 'd':
395                 pkdSize += count * 4;
396                 unpkdSize += count * sizeof(bios_U032);
397                 break;
398         }
399     }
400 
401     if (packedSize != NULL)
402         *packedSize = pkdSize;
403 
404     if (unpackedSize != NULL)
405         *unpackedSize = unpkdSize;
406 }
407 
408 /*!
409  * @brief Calculate packed and unpacked data size based on given data format
410  *
411  * @param[in]   format          Data format
412  * @param[out]  packedSize      Packed data size
413  * @param[out]  unpackedSize    Unpacked data size
414  *
415  */
416 
417 NV_STATUS
418 _nvswitch_vbios_read_structure
419 (
420     nvswitch_device *device,
421     void            *structure,
422     NvU32           offset,
423     NvU32           *ppacked_size,
424     const char      *format
425 )
426 {
427     NvU32  packed_size;
428     NvU8  *packed_data;
429     NvU32  unpacked_bytes;
430 
431     // calculate the size of the data as indicated by its packed format.
432     _nvswitch_devinit_calculate_sizes(format, &packed_size, &unpacked_bytes);
433 
434     if (ppacked_size)
435         *ppacked_size = packed_size;
436 
437     //
438     // is 'offset' too big?
439     // happens when we read bad ptrs from fixed addrs in image frequently
440     //
441     if ((offset + packed_size) > device->biosImage.size)
442     {
443         NVSWITCH_PRINT(device, ERROR, "%s: Bad offset in bios read: 0x%x, max is 0x%x, fmt is '%s'\n",
444                        __FUNCTION__, offset, device->biosImage.size, format);
445         return NV_ERR_GENERIC;
446     }
447 
448     packed_data = &device->biosImage.pImage[offset];
449     return _nvswitch_devinit_unpack_structure(format, packed_data, structure,
450                                               &unpacked_bytes, NULL);
451 }
452 
453 
454 NvlStatus
455 nvswitch_vbios_read_structure_lr10
456 (
457     nvswitch_device *device,
458     void            *structure,
459     NvU32           offset,
460     NvU32           *ppacked_size,
461     const char      *format
462 )
463 {
464     if (NV_OK == _nvswitch_vbios_read_structure(device, structure, offset, ppacked_size, format))
465     {
466        return NVL_SUCCESS;
467     }
468     else
469     {
470        return -NVL_ERR_GENERIC;
471     }
472 }
473 
474 NvU8
475 _nvswitch_vbios_read8
476 (
477     nvswitch_device *device,
478     NvU32           offset
479 )
480 {
481     bios_U008 data;     // BiosReadStructure expects 'bios' types
482 
483     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "b");
484 
485     return (NvU8) data;
486 }
487 
488 NvU16
489 _nvswitch_vbios_read16
490 (
491     nvswitch_device *device,
492     NvU32           offset
493 )
494 {
495     bios_U016 data;     // BiosReadStructure expects 'bios' types
496 
497     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "w");
498 
499     return (NvU16) data;
500 }
501 
502 
503 NvU32
504 _nvswitch_vbios_read32
505 (
506     nvswitch_device *device,
507     NvU32           offset
508 )
509 {
510     bios_U032 data;     // BiosReadStructure expects 'bios' types
511 
512     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "d");
513 
514     return (NvU32) data;
515 }
516 
517 NV_STATUS
518 _nvswitch_perform_BIT_offset_update
519 (
520     nvswitch_device *device,
521     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
522 )
523 {
524     BIT_HEADER_V1_00         bitHeader;
525     BIT_TOKEN_V1_00          bitToken;
526     NV_STATUS                rmStatus;
527     NvU32                    dataPointerOffset;
528     NvU32 i;
529 
530     rmStatus = _nvswitch_vbios_read_structure(device,
531                                               (NvU8*) &bitHeader,
532                                               bios_config->bit_address,
533                                               (NvU32 *) 0,
534                                               BIT_HEADER_V1_00_FMT);
535 
536     if(rmStatus != NV_OK)
537     {
538         NVSWITCH_PRINT(device, ERROR,
539                        "%s: Failed to read BIT table structure!.\n",
540                        __FUNCTION__);
541         return rmStatus;
542     }
543 
544     for(i=0; i < bitHeader.TokenEntries; i++)
545     {
546         NvU32 BitTokenLocation = bios_config->bit_address + bitHeader.HeaderSize + (i * bitHeader.TokenSize);
547         rmStatus = _nvswitch_vbios_read_structure(device,
548                                                  (NvU8*) &bitToken,
549                                                  BitTokenLocation,
550                                                  (NvU32 *) 0,
551                                                  BIT_TOKEN_V1_00_FMT);
552         if(rmStatus != NV_OK)
553         {
554             NVSWITCH_PRINT(device, WARN,
555                 "%s: Failed to read BIT token %d!\n",
556                 __FUNCTION__, i);
557             return NV_ERR_GENERIC;
558         }
559 
560         dataPointerOffset = (bios_config->pci_image_address + bitToken.DataPtr);
561         switch(bitToken.TokenId)
562         {
563             case BIT_TOKEN_NVINIT_PTRS:
564             {
565                 BIT_DATA_NVINIT_PTRS_V1 nvInitTablePtrs;
566                 rmStatus = _nvswitch_vbios_read_structure(device,
567                                                           (NvU8*) &nvInitTablePtrs,
568                                                           dataPointerOffset,
569                                                           (NvU32 *) 0,
570                                                           BIT_DATA_NVINIT_PTRS_V1_30_FMT);
571                 if (rmStatus != NV_OK)
572                 {
573                     NVSWITCH_PRINT(device, WARN,
574                                    "%s: Failed to read internal data structure\n",
575                                    __FUNCTION__);
576                     return NV_ERR_GENERIC;
577                 }
578                 // Update the retrived info with device info
579                 bios_config->nvlink_config_table_address = (nvInitTablePtrs.NvlinkConfigDataPtr + bios_config->pci_image_address);
580             }
581             break;
582         }
583     }
584 
585     return NV_OK;
586 }
587 
588 NV_STATUS
589 _nvswitch_validate_BIT_header
590 (
591     nvswitch_device *device,
592     NvU32            bit_address
593 )
594 {
595     NvU32    headerSize = 0;
596     NvU32    chkSum = 0;
597     NvU32    i;
598 
599     //
600     // For now let's assume the Header Size is always at the same place.
601     // We can create something more complex if needed later.
602     //
603     headerSize = (NvU32)_nvswitch_vbios_read8(device, bit_address + BIT_HEADER_SIZE_OFFSET);
604 
605     // Now perform checksum
606     for (i = 0; i < headerSize; i++)
607         chkSum += (NvU32)_nvswitch_vbios_read8(device, bit_address + i);
608 
609     //Byte checksum removes upper bytes
610     chkSum = chkSum & 0xFF;
611 
612     if (chkSum)
613         return NV_ERR_GENERIC;
614 
615     return NV_OK;
616 }
617 
618 
619 NV_STATUS
620 nvswitch_verify_header
621 (
622     nvswitch_device *device,
623     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
624 )
625 {
626     NvU32       i;
627     NV_STATUS   status = NV_ERR_GENERIC;
628 
629     if ((bios_config == NULL) || (!bios_config->pci_image_address))
630     {
631         NVSWITCH_PRINT(device, ERROR,
632             "%s: PCI Image offset is not identified\n",
633             __FUNCTION__);
634         return status;
635     }
636 
637     // attempt to find the init info in the BIOS
638     for (i = bios_config->pci_image_address; i < device->biosImage.size - 3; i++)
639     {
640         NvU16 bitheaderID = _nvswitch_vbios_read16(device, i);
641         if (bitheaderID == BIT_HEADER_ID)
642         {
643             NvU32 signature = _nvswitch_vbios_read32(device, i + 2);
644             if (signature == BIT_HEADER_SIGNATURE)
645             {
646                 bios_config->bit_address = i;
647 
648                 // Checksum BIT to prove accuracy
649                 if (NV_OK != _nvswitch_validate_BIT_header(device, bios_config->bit_address))
650                 {
651                     device->biosImage.pImage = 0;
652                     device->biosImage.size = 0;
653                 }
654             }
655         }
656         // only if we find the bit address do we break
657         if (bios_config->bit_address)
658             break;
659     }
660     if (bios_config->bit_address)
661     {
662         status = NV_OK;
663     }
664 
665     return status;
666 }
667 
668 NV_STATUS
669 _nvswitch_vbios_update_bit_Offset
670 (
671     nvswitch_device *device,
672     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
673 )
674 {
675     NV_STATUS   status = NV_OK;
676 
677     if (bios_config->bit_address)
678     {
679         goto vbios_update_bit_Offset_done;
680     }
681 
682     status = nvswitch_verify_header(device, bios_config);
683     if (status != NV_OK)
684     {
685         NVSWITCH_PRINT(device, ERROR, "%s: *** BIT header is not found in vbios!\n",
686             __FUNCTION__);
687         goto vbios_update_bit_Offset_done;
688     }
689 
690     if (bios_config->bit_address)
691     {
692 
693         status = _nvswitch_perform_BIT_offset_update(device, bios_config);
694         if (status != NV_OK)
695             goto vbios_update_bit_Offset_done;
696     }
697 
698 vbios_update_bit_Offset_done:
699     return status;
700 }
701 
702 
703 NV_STATUS
704 _nvswitch_vbios_identify_pci_image_loc
705 (
706     nvswitch_device         *device,
707     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
708 )
709 {
710     NV_STATUS   status = NV_OK;
711     NvU32       i;
712 
713     if (bios_config->pci_image_address)
714     {
715         goto vbios_identify_pci_image_loc_done;
716     }
717 
718     // Match the PCI_EXP_ROM_SIGNATURE and followed by the PCI Data structure
719     // with PCIR and matching vendor ID
720     NVSWITCH_PRINT(device, SETUP,
721         "%s: Verifying and extracting PCI Data.\n",
722         __FUNCTION__);
723 
724     // attempt to find the init info in the BIOS
725     for (i = 0; i < (device->biosImage.size - PCI_ROM_HEADER_PCI_DATA_SIZE); i++)
726     {
727         NvU16 pci_rom_sigature = _nvswitch_vbios_read16(device, i);
728 
729         if (pci_rom_sigature == PCI_EXP_ROM_SIGNATURE)
730         {
731             NvU32 pcir_data_dffSet  = _nvswitch_vbios_read16(device, i + PCI_ROM_HEADER_SIZE);  // 0x16 -> 0x18 i.e, including the ROM Signature bytes
732 
733             if (((i + pcir_data_dffSet) + PCI_DATA_STRUCT_SIZE) < device->biosImage.size)
734             {
735                 NvU32 pcirSigature = _nvswitch_vbios_read32(device, (i + pcir_data_dffSet));
736 
737                 if (pcirSigature == PCI_DATA_STRUCT_SIGNATURE)
738                 {
739                     PCI_DATA_STRUCT pciData;
740                     status = _nvswitch_vbios_read_structure(device,
741                                                            (NvU8*) &pciData,
742                                                             i + pcir_data_dffSet,
743                                                             (NvU32 *) 0,
744                                                             PCI_DATA_STRUCT_FMT);
745                     if (status != NV_OK)
746                     {
747                         NVSWITCH_PRINT(device, WARN,
748                                        "%s: Failed to PCI Data for validation\n",
749                                        __FUNCTION__);
750                         goto vbios_identify_pci_image_loc_done;
751                     }
752 
753                     // Validate the vendor details as well
754                     if (pciData.vendorID == PCI_VENDOR_ID_NVIDIA)
755                     {
756                         bios_config->pci_image_address = i;
757                         break;
758                     }
759                 }
760             }
761         }
762     }
763 
764 vbios_identify_pci_image_loc_done:
765     return status;
766 }
767 
768 NvU32 _nvswitch_get_nvlink_config_address
769 (
770     nvswitch_device         *device,
771     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
772 )
773 {
774     return bios_config->nvlink_config_table_address;
775 }
776 
777 NV_STATUS
778 _nvswitch_read_vbios_link_base_entry
779 (
780     nvswitch_device *device,
781     NvU32            tblPtr,
782     NVLINK_CONFIG_DATA_BASEENTRY  *link_base_entry
783 )
784 {
785     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
786     NVLINK_VBIOS_CONFIG_DATA_BASEENTRY vbios_link_base_entry;
787 
788     status = _nvswitch_vbios_read_structure(device, &vbios_link_base_entry, tblPtr, (NvU32 *)0, NVLINK_CONFIG_DATA_BASEENTRY_FMT);
789     if (status != NV_OK)
790     {
791         NVSWITCH_PRINT(device, ERROR,
792             "%s: Error on reading nvlink base entry\n",
793             __FUNCTION__);
794         return status;
795     }
796 
797     link_base_entry->positionId = vbios_link_base_entry.positionId;
798 
799     return status;
800 }
801 
802 NvlStatus
803 nvswitch_read_vbios_link_entries_lr10
804 (
805     nvswitch_device              *device,
806     NvU32                         tblPtr,
807     NvU32                         expected_link_entriesCount,
808     NVLINK_CONFIG_DATA_LINKENTRY *link_entries,
809     NvU32                        *identified_link_entriesCount
810 )
811 {
812     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
813     NvU32 i;
814     NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20 vbios_link_entry;
815     *identified_link_entriesCount = 0;
816 
817     for (i = 0; i < expected_link_entriesCount; i++)
818     {
819         status = _nvswitch_vbios_read_structure(device,
820                                                 &vbios_link_entry,
821                                                 tblPtr, (NvU32 *)0,
822                                                 NVLINK_CONFIG_DATA_LINKENTRY_FMT_20);
823         if (status != NV_OK)
824         {
825             NVSWITCH_PRINT(device, ERROR,
826                 "%s: Error on reading nvlink entry\n",
827                 __FUNCTION__);
828             return status;
829         }
830         link_entries[i].nvLinkparam0 = (NvU8)vbios_link_entry.nvLinkparam0;
831         link_entries[i].nvLinkparam1 = (NvU8)vbios_link_entry.nvLinkparam1;
832         link_entries[i].nvLinkparam2 = (NvU8)vbios_link_entry.nvLinkparam2;
833         link_entries[i].nvLinkparam3 = (NvU8)vbios_link_entry.nvLinkparam3;
834         link_entries[i].nvLinkparam4 = (NvU8)vbios_link_entry.nvLinkparam4;
835         link_entries[i].nvLinkparam5 = (NvU8)vbios_link_entry.nvLinkparam5;
836         link_entries[i].nvLinkparam6 = (NvU8)vbios_link_entry.nvLinkparam6;
837         tblPtr += (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32));
838 
839         NVSWITCH_PRINT(device, SETUP,
840             "<<<---- NvLink ID 0x%x ---->>>\n", i);
841         NVSWITCH_PRINT(device, SETUP,
842             "NVLink Params 0 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam0, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam0));
843         NVSWITCH_PRINT(device, SETUP,
844             "NVLink Params 1 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam1, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam1));
845         NVSWITCH_PRINT(device, SETUP,
846             "NVLink Params 2 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam2, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam2));
847         NVSWITCH_PRINT(device, SETUP,
848             "NVLink Params 3 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam3, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam3));
849         NVSWITCH_PRINT(device, SETUP,
850             "NVLink Params 4 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam4, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam4));
851         NVSWITCH_PRINT(device, SETUP,
852             "NVLink Params 5 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam5, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam5));
853         NVSWITCH_PRINT(device, SETUP,
854             "NVLink Params 6 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam6, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam6));
855         NVSWITCH_PRINT(device, SETUP,
856             "<<<---- NvLink ID 0x%x ---->>>\n\n", i);
857     }
858     *identified_link_entriesCount = i;
859     return status;
860 }
861 
862 NV_STATUS
863 _nvswitch_vbios_fetch_nvlink_entries
864 (
865     nvswitch_device         *device,
866     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
867 )
868 {
869     NvU32                       tblPtr;
870     NvU8                        version;
871     NvU8                        size;
872     NV_STATUS                   status = NV_ERR_GENERIC;
873     NVLINK_CONFIG_DATA_HEADER   header;
874     NvU32                       base_entry_index;
875     NvU32                       expected_base_entry_count;
876 
877     tblPtr = _nvswitch_get_nvlink_config_address(device, bios_config);
878     if (!tblPtr)
879     {
880         NVSWITCH_PRINT(device, ERROR,
881             "%s: No NvLink Config table set\n",
882             __FUNCTION__);
883         goto vbios_fetch_nvlink_entries_done;
884     }
885 
886     // Read the table version number
887     version = _nvswitch_vbios_read8(device, tblPtr);
888     switch (version)
889     {
890         case NVLINK_CONFIG_DATA_HEADER_VER_20:
891         case NVLINK_CONFIG_DATA_HEADER_VER_30:
892             size = _nvswitch_vbios_read8(device, tblPtr + 1);
893             if (size == NVLINK_CONFIG_DATA_HEADER_20_SIZE)
894             {
895                 // Grab Nvlink Config Data Header
896                 status = _nvswitch_vbios_read_structure(device, &header.ver_20, tblPtr, (NvU32 *) 0, NVLINK_CONFIG_DATA_HEADER_20_FMT);
897 
898                 if (status != NV_OK)
899                 {
900                     NVSWITCH_PRINT(device, ERROR,
901                         "%s: Error on reading the nvlink config header\n",
902                         __FUNCTION__);
903                 }
904             }
905             break;
906         default:
907             NVSWITCH_PRINT(device, ERROR,
908                 "%s: Invalid version 0x%x\n",
909                 __FUNCTION__, version);
910     }
911     if (status != NV_OK)
912     {
913         goto vbios_fetch_nvlink_entries_done;
914     }
915 
916     NVSWITCH_PRINT(device, SETUP,
917         "<<<---- NvLink Header ---->>>\n\n");
918     NVSWITCH_PRINT(device, SETUP,
919         "Version \t\t 0x%x\n", header.ver_20.Version);
920     NVSWITCH_PRINT(device, SETUP,
921         "Header Size \t0x%x\n", header.ver_20.HeaderSize);
922     NVSWITCH_PRINT(device, SETUP,
923         "Base Entry Size \t0x%x\n", header.ver_20.BaseEntrySize);
924     NVSWITCH_PRINT(device, SETUP,
925         "Base Entry count \t0x%x\n", header.ver_20.BaseEntryCount);
926     NVSWITCH_PRINT(device, SETUP,
927         "Link Entry Size \t0x%x\n", header.ver_20.LinkEntrySize);
928     NVSWITCH_PRINT(device, SETUP,
929         "Link Entry Count \t0x%x\n", header.ver_20.LinkEntryCount);
930     NVSWITCH_PRINT(device, SETUP,
931         "Reserved \t0x%x\n", header.ver_20.Reserved);
932     NVSWITCH_PRINT(device, SETUP,
933         "<<<---- NvLink Header ---->>>\n");
934     if (header.ver_20.Version == NVLINK_CONFIG_DATA_HEADER_VER_20)
935     {
936          device->bIsNvlinkVbiosTableVersion2 = NV_TRUE;
937     }
938     expected_base_entry_count = header.ver_20.BaseEntryCount;
939     if (expected_base_entry_count > NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY)
940     {
941         NVSWITCH_PRINT(device, WARN,
942             "%s: Greater than expected base entry count 0x%x - Restricting to count 0x%x\n",
943             __FUNCTION__, expected_base_entry_count, NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY);
944         expected_base_entry_count = NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY;
945     }
946 
947     tblPtr += header.ver_20.HeaderSize;
948     for (base_entry_index = 0; base_entry_index < expected_base_entry_count; base_entry_index++)
949     {
950         NvU32 expected_link_entriesCount = header.ver_20.LinkEntryCount;
951         if (expected_link_entriesCount > NVSWITCH_LINK_COUNT(device))
952         {
953             NVSWITCH_PRINT(device, WARN,
954                 "%s: Greater than expected link count 0x%x - Restricting to count 0x%x\n",
955                 __FUNCTION__, expected_link_entriesCount, NVSWITCH_LINK_COUNT(device));
956             expected_link_entriesCount = NVSWITCH_LINK_COUNT(device);
957         }
958 
959         // Grab Nvlink Config Data Base Entry
960         _nvswitch_read_vbios_link_base_entry(device, tblPtr, &bios_config->link_vbios_base_entry[base_entry_index]);
961         tblPtr += header.ver_20.BaseEntrySize;
962         device->hal.nvswitch_read_vbios_link_entries(device,
963                                           tblPtr,
964                                           expected_link_entriesCount,
965                                           bios_config->link_vbios_entry[base_entry_index],
966                                           &bios_config->identified_Link_entries[base_entry_index]);
967 
968         if (device->bIsNvlinkVbiosTableVersion2)
969         {
970             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32)));
971         }
972         else
973         {
974             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_30)/sizeof(NvU32)));
975         }
976     }
977 vbios_fetch_nvlink_entries_done:
978     return status;
979 }
980 
981 NV_STATUS
982 _nvswitch_vbios_assign_base_entry
983 (
984     nvswitch_device         *device,
985     NVSWITCH_BIOS_NVLINK_CONFIG    *bios_config
986 )
987 {
988     NvU32 physical_id;
989     NvU32 entry_index;
990 
991     physical_id = nvswitch_read_physical_id(device);
992 
993     for (entry_index = 0; entry_index < NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY; entry_index++)
994     {
995         if (physical_id == bios_config->link_vbios_base_entry[entry_index].positionId)
996         {
997             bios_config->link_base_entry_assigned = entry_index;
998             return NV_OK;
999         }
1000     }
1001 
1002     // TODO: Bug 3507948
1003     NVSWITCH_PRINT(device, ERROR,
1004             "%s: Error on assigning base entry. Setting base entry index = 0\n",
1005             __FUNCTION__);
1006     bios_config->link_base_entry_assigned = 0;
1007 
1008     return NV_OK;
1009 }
1010 
1011 NV_STATUS
1012 _nvswitch_setup_link_vbios_overrides
1013 (
1014     nvswitch_device *device,
1015     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
1016 )
1017 {
1018     NV_STATUS    status         = NV_OK;
1019 
1020     if (bios_config == NULL)
1021     {
1022         NVSWITCH_PRINT(device, ERROR,
1023                 "%s: BIOS config override not supported\n",
1024                 __FUNCTION__);
1025          return -NVL_ERR_NOT_SUPPORTED;
1026     }
1027 
1028     bios_config->vbios_disabled_link_mask = 0;
1029 
1030     bios_config->bit_address                 = 0;
1031     bios_config->pci_image_address           = 0;
1032     bios_config->nvlink_config_table_address = 0;
1033 
1034     if ((device->biosImage.size == 0) || (device->biosImage.pImage == NULL))
1035     {
1036         NVSWITCH_PRINT(device, ERROR,
1037                 "%s: VBIOS not exist size:0x%x\n",
1038                 __FUNCTION__, device->biosImage.size);
1039          return -NVL_ERR_NOT_SUPPORTED;
1040     }
1041 
1042     //
1043     // Locate the PCI ROM Image
1044     //
1045     if (_nvswitch_vbios_identify_pci_image_loc(device, bios_config)  != NV_OK)
1046     {
1047         NVSWITCH_PRINT(device, ERROR,
1048             "%s: Error on identifying pci image loc\n",
1049             __FUNCTION__);
1050         status = NV_ERR_GENERIC;
1051         goto setup_link_vbios_overrides_done;
1052     }
1053 
1054     //
1055     // Locate and fetch BIT offset
1056     //
1057     if (_nvswitch_vbios_update_bit_Offset(device, bios_config) != NV_OK)
1058     {
1059         NVSWITCH_PRINT(device, ERROR,
1060             "%s: Error on identifying pci image loc\n",
1061             __FUNCTION__);
1062         status = NV_ERR_GENERIC;
1063         goto setup_link_vbios_overrides_done;
1064     }
1065 
1066     //
1067     // Fetch NvLink Entries
1068     //
1069     if (_nvswitch_vbios_fetch_nvlink_entries(device, bios_config) != NV_OK)
1070     {
1071         NVSWITCH_PRINT(device, ERROR,
1072             "%s: Error on fetching nvlink entries\n",
1073             __FUNCTION__);
1074         status = NV_ERR_GENERIC;
1075         goto setup_link_vbios_overrides_done;
1076     }
1077 
1078     //
1079     // Assign Base Entry for this device
1080     //
1081     if (_nvswitch_vbios_assign_base_entry(device, bios_config) != NV_OK)
1082     {
1083         NVSWITCH_PRINT(device, ERROR,
1084             "%s: Error on assigning base entry\n",
1085             __FUNCTION__);
1086         status = NV_ERR_GENERIC;
1087         goto setup_link_vbios_overrides_done;
1088     }
1089 
1090 setup_link_vbios_overrides_done:
1091     if (status != NV_OK)
1092     {
1093         bios_config->bit_address                = 0;
1094         bios_config->pci_image_address          = 0;
1095         bios_config->nvlink_config_table_address =0;
1096     }
1097     return status;
1098 }
1099 
1100 /*
1101  * @Brief : Setting up system registers after device initialization
1102  *
1103  * @Description :
1104  *
1105  * @param[in] device        a reference to the device to initialize
1106  */
1107 NvlStatus
1108 nvswitch_setup_system_registers_lr10
1109 (
1110     nvswitch_device *device
1111 )
1112 {
1113     nvlink_link *link;
1114     NvU8 i;
1115     NvU64 enabledLinkMask;
1116 
1117     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
1118 
1119     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
1120     {
1121         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
1122 
1123         link = nvswitch_get_link(device, i);
1124 
1125         if ((link == NULL) ||
1126             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
1127             (i >= NVSWITCH_NVLINK_MAX_LINKS))
1128         {
1129             continue;
1130         }
1131 
1132         nvswitch_setup_link_system_registers(device, link);
1133         nvswitch_load_link_disable_settings(device, link);
1134     }
1135     FOR_EACH_INDEX_IN_MASK_END;
1136 
1137     return NVL_SUCCESS;
1138 }
1139 
1140 NvlStatus
1141 nvswitch_deassert_link_reset_lr10
1142 (
1143     nvswitch_device *device,
1144     nvlink_link     *link
1145 )
1146 {
1147     NvU64 mode;
1148     NvlStatus status = NVL_SUCCESS;
1149 
1150     status = device->hal.nvswitch_corelib_get_dl_link_mode(link, &mode);
1151 
1152     if (status != NVL_SUCCESS)
1153     {
1154         NVSWITCH_PRINT(device, ERROR,
1155                 "%s:DL link mode failed on link %d\n",
1156                 __FUNCTION__, link->linkNumber);
1157         return status;
1158     }
1159 
1160     // Check if the link is RESET
1161     if (mode != NVLINK_LINKSTATE_RESET)
1162     {
1163         return NVL_SUCCESS;
1164     }
1165 
1166     // Send INITPHASE1 to bring link out of reset
1167     status = link->link_handlers->set_dl_link_mode(link,
1168                                         NVLINK_LINKSTATE_INITPHASE1,
1169                                         NVLINK_STATE_CHANGE_ASYNC);
1170 
1171     if (status != NVL_SUCCESS)
1172     {
1173         NVSWITCH_PRINT(device, ERROR,
1174                 "%s: INITPHASE1 failed on link %d\n",
1175                 __FUNCTION__, link->linkNumber);
1176     }
1177 
1178     return status;
1179 }
1180 
1181 static NvU32
1182 _nvswitch_get_num_vcs_lr10
1183 (
1184     nvswitch_device *device
1185 )
1186 {
1187     return NVSWITCH_NUM_VCS_LR10;
1188 }
1189 
1190 void
1191 nvswitch_determine_platform_lr10
1192 (
1193     nvswitch_device *device
1194 )
1195 {
1196     NvU32 value;
1197 
1198     //
1199     // Determine which model we are using SMC_BOOT_2 and OS query
1200     //
1201     value = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_2);
1202     device->is_emulation = FLD_TEST_DRF(_PSMC, _BOOT_2, _EMULATION, _YES, value);
1203 
1204     if (!IS_EMULATION(device))
1205     {
1206         // If we are not on fmodel, we must be on RTL sim or silicon
1207         if (FLD_TEST_DRF(_PSMC, _BOOT_2, _FMODEL, _YES, value))
1208         {
1209             device->is_fmodel = NV_TRUE;
1210         }
1211         else
1212         {
1213             device->is_rtlsim = NV_TRUE;
1214 
1215             // Let OS code finalize RTL sim vs silicon setting
1216             nvswitch_os_override_platform(device->os_handle, &device->is_rtlsim);
1217         }
1218     }
1219 
1220 #if defined(NVLINK_PRINT_ENABLED)
1221     {
1222         const char *build;
1223         const char *mode;
1224 
1225         build = "HW";
1226         if (IS_FMODEL(device))
1227             mode = "fmodel";
1228         else if (IS_RTLSIM(device))
1229             mode = "rtlsim";
1230         else if (IS_EMULATION(device))
1231             mode = "emulation";
1232         else
1233             mode = "silicon";
1234 
1235         NVSWITCH_PRINT(device, SETUP,
1236             "%s: build: %s platform: %s\n",
1237              __FUNCTION__, build, mode);
1238     }
1239 #endif // NVLINK_PRINT_ENABLED
1240 }
1241 
1242 static void
1243 _nvswitch_portstat_reset_latency_counters
1244 (
1245     nvswitch_device *device
1246 )
1247 {
1248     // Set SNAPONDEMAND from 0->1 to reset the counters
1249     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1250         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1251         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1252 
1253     // Set SNAPONDEMAND back to 0.
1254     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1255         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1256         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1257 }
1258 
1259 //
1260 // Data collector which runs on a background thread, collecting latency stats.
1261 //
1262 // The latency counters have a maximum window period of 3.299 seconds
1263 // (2^32 clk cycles). The counters reset after this period. So SW snaps
1264 // the bins and records latencies every 3 seconds. Setting SNAPONDEMAND from 0->1
1265 // snaps the  latency counters and updates them to PRI registers for
1266 // the SW to read. It then resets the counters to start collecting fresh latencies.
1267 //
1268 
1269 void
1270 nvswitch_internal_latency_bin_log_lr10
1271 (
1272     nvswitch_device *device
1273 )
1274 {
1275     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1276     NvU32 idx_nport;
1277     NvU32 idx_vc;
1278     NvBool vc_valid;
1279     NvU32 latency;
1280     NvU64 time_nsec;
1281     NvU32 link_type;    // Access or trunk link
1282     NvU64 last_visited_time_nsec;
1283 
1284     if (chip_device->latency_stats == NULL)
1285     {
1286         // Latency stat buffers not allocated yet
1287         return;
1288     }
1289 
1290     time_nsec = nvswitch_os_get_platform_time();
1291     last_visited_time_nsec = chip_device->latency_stats->last_visited_time_nsec;
1292 
1293     // Update last visited time
1294     chip_device->latency_stats->last_visited_time_nsec = time_nsec;
1295 
1296     // Compare time stamp and reset the counters if the snap is missed
1297     if (!IS_RTLSIM(device) || !IS_FMODEL(device))
1298     {
1299         if ((last_visited_time_nsec != 0) &&
1300             ((time_nsec - last_visited_time_nsec) > 3 * NVSWITCH_INTERVAL_1SEC_IN_NS))
1301         {
1302             NVSWITCH_PRINT(device, ERROR,
1303                 "Latency metrics recording interval missed.  Resetting counters.\n");
1304             _nvswitch_portstat_reset_latency_counters(device);
1305             return;
1306         }
1307     }
1308 
1309     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
1310     {
1311         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
1312         {
1313             continue;
1314         }
1315 
1316         // Setting SNAPONDEMAND from 0->1 snaps the latencies and resets the counters
1317         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1318             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1319             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1320 
1321         //
1322         // TODO: Check _STARTCOUNTER and don't log if counter not enabled.
1323         // Currently all counters are always enabled
1324         //
1325 
1326         link_type = NVSWITCH_LINK_RD32_LR10(device, idx_nport, NPORT, _NPORT, _CTRL);
1327         for (idx_vc = 0; idx_vc < NVSWITCH_NUM_VCS_LR10; idx_vc++)
1328         {
1329             vc_valid = NV_FALSE;
1330 
1331             // VC's CREQ0(0) and RSP0(5) are relevant on access links.
1332             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, link_type) &&
1333                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) ||
1334                 (idx_vc == NV_NPORT_VC_MAPPING_RSP0)))
1335             {
1336                 vc_valid = NV_TRUE;
1337             }
1338 
1339             // VC's CREQ0(0), RSP0(5), CREQ1(6) and RSP1(7) are relevant on trunk links.
1340             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, link_type) &&
1341                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0)  ||
1342                  (idx_vc == NV_NPORT_VC_MAPPING_RSP0)   ||
1343                  (idx_vc == NV_NPORT_VC_MAPPING_CREQ1)  ||
1344                  (idx_vc == NV_NPORT_VC_MAPPING_RSP1)))
1345             {
1346                 vc_valid = NV_TRUE;
1347             }
1348 
1349             // If the VC is not being used, skip reading it
1350             if (!vc_valid)
1351             {
1352                 continue;
1353             }
1354 
1355             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _LOW, idx_vc);
1356             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].low += latency;
1357 
1358             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _MEDIUM, idx_vc);
1359             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].medium += latency;
1360 
1361             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _HIGH, idx_vc);
1362             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].high += latency;
1363 
1364             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _PANIC, idx_vc);
1365             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].panic += latency;
1366 
1367             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _PACKET, _COUNT, idx_vc);
1368             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].count += latency;
1369 
1370             // Note the time of this snap
1371             chip_device->latency_stats->latency[idx_vc].last_read_time_nsec = time_nsec;
1372             chip_device->latency_stats->latency[idx_vc].count++;
1373         }
1374 
1375         // Disable SNAPONDEMAND after fetching the latencies
1376         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1377             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1378             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1379     }
1380 }
1381 
1382 void
1383 nvswitch_ecc_writeback_task_lr10
1384 (
1385     nvswitch_device *device
1386 )
1387 {
1388 }
1389 
1390 void
1391 nvswitch_set_ganged_link_table_lr10
1392 (
1393     nvswitch_device *device,
1394     NvU32            firstIndex,
1395     NvU64           *ganged_link_table,
1396     NvU32            numEntries
1397 )
1398 {
1399     NvU32 i;
1400 
1401     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_ADDRESS,
1402         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _INDEX, firstIndex) |
1403         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _AUTO_INCR, 1));
1404 
1405     for (i = 0; i < numEntries; i++)
1406     {
1407         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1408             NvU64_LO32(ganged_link_table[i]));
1409 
1410         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1411             NvU64_HI32(ganged_link_table[i]));
1412     }
1413 }
1414 
1415 static NvlStatus
1416 _nvswitch_init_ganged_link_routing
1417 (
1418     nvswitch_device *device
1419 )
1420 {
1421     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1422     NvU32        gang_index, gang_size;
1423     NvU64        gang_entry;
1424     NvU32        block_index;
1425     NvU32        block_count = 16;
1426     NvU32        glt_entries = 16;
1427     NvU32        glt_size = ROUTE_GANG_TABLE_SIZE / 2;
1428     NvU64        *ganged_link_table = NULL;
1429     NvU32        block_size = ROUTE_GANG_TABLE_SIZE / block_count;
1430     NvU32        table_index = 0;
1431     NvU32        i;
1432 
1433     //
1434     // Refer to switch IAS 11.2 Figure 82. Limerock Ganged RAM Table Format
1435     //
1436     // The ganged link routing table is composed of 512 entries divided into 16 sections.
1437     // Each section specifies how requests should be routed through the ganged links.
1438     // Each 32-bit entry is composed of eight 4-bit fields specifying the set of of links
1439     // to distribute through.  More complex spray patterns could be constructed, but for
1440     // now initialize it with a uniform distribution pattern.
1441     //
1442     // The ganged link routing table will be loaded with following values:
1443     // Typically the first section would be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,15),...
1444     // Typically the second section would be filled with (0,0,0,0,0,0,0,0), (0,0,0,0,0,0,0,0),...
1445     // Typically the third section would be filled with (0,1,0,1,0,1,0,1), (0,1,0,1,0,1,0,1),...
1446     // Typically the third section would be filled with (0,1,2,0,1,2,0,1), (2,0,1,2,0,1,2,0),...
1447     //  :
1448     // The last section would typically be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,0),...
1449     //
1450     // Refer table 20: Definition of size bits used with Ganged Link Number Table.
1451     // Note that section 0 corresponds with 16 ganged links.  Section N corresponds with
1452     // N ganged links.
1453     //
1454 
1455     //Alloc memory for Ganged Link Table
1456     ganged_link_table = nvswitch_os_malloc(glt_size * sizeof(gang_entry));
1457     if (ganged_link_table == NULL)
1458     {
1459         NVSWITCH_PRINT(device, ERROR,
1460             "Failed to allocate memory for GLT!!\n");
1461         return -NVL_NO_MEM;
1462     }
1463 
1464     for (block_index = 0; block_index < block_count; block_index++)
1465     {
1466         gang_size = ((block_index==0) ? 16 : block_index);
1467 
1468         for (gang_index = 0; gang_index < block_size/2; gang_index++)
1469         {
1470             gang_entry = 0;
1471             NVSWITCH_ASSERT(table_index < glt_size);
1472 
1473             for (i = 0; i < glt_entries; i++)
1474             {
1475                 gang_entry |=
1476                     DRF_NUM64(_ROUTE, _REG_TABLE_DATA0, _GLX(i), (16 * gang_index + i) % gang_size);
1477             }
1478 
1479             ganged_link_table[table_index++] = gang_entry;
1480         }
1481     }
1482 
1483     nvswitch_set_ganged_link_table_lr10(device, 0, ganged_link_table, glt_size);
1484 
1485     chip_device->ganged_link_table = ganged_link_table;
1486 
1487     return NVL_SUCCESS;
1488 }
1489 
1490 static NvlStatus
1491 nvswitch_initialize_ip_wrappers_lr10
1492 (
1493     nvswitch_device *device
1494 )
1495 {
1496     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1497     NvU32 engine_enable_mask;
1498     NvU32 engine_disable_mask;
1499     NvU32 i, j;
1500     NvU32 idx_link;
1501 
1502     //
1503     // Now that software knows the devices and addresses, it must take all
1504     // the wrapper modules out of reset.  It does this by writing to the
1505     // PMC module enable registers.
1506     //
1507 
1508 // Temporary - bug 2069764
1509 //    NVSWITCH_REG_WR32(device, _PSMC, _ENABLE,
1510 //        DRF_DEF(_PSMC, _ENABLE, _SAW, _ENABLE) |
1511 //        DRF_DEF(_PSMC, _ENABLE, _PRIV_RING, _ENABLE) |
1512 //        DRF_DEF(_PSMC, _ENABLE, _PERFMON, _ENABLE));
1513 
1514     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE,
1515         DRF_DEF(_NVLSAW_NVSPMC, _ENABLE, _NXBAR, _ENABLE));
1516 
1517     //
1518     // At this point the list of discovered devices has been cross-referenced
1519     // with the ROM configuration, platform configuration, and regkey override.
1520     // The NVLIPT & NPORT enable filtering done here further updates the MMIO
1521     // information based on KVM.
1522     //
1523 
1524     // Enable the NVLIPT units that have been discovered
1525     engine_enable_mask = 0;
1526     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NVLW, ); i++)
1527     {
1528         if (NVSWITCH_ENG_IS_VALID(device, NVLW, i))
1529         {
1530             engine_enable_mask |= NVBIT(i);
1531         }
1532     }
1533     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT, engine_enable_mask);
1534 
1535     //
1536     // In bare metal we write ENABLE_NVLIPT to enable the units that aren't
1537     // disabled by ROM configuration, platform configuration, or regkey override.
1538     // If we are running inside a VM, the hypervisor has already set ENABLE_NVLIPT
1539     // and write protected it.  Reading ENABLE_NVLIPT tells us which units we
1540     // are allowed to use inside this VM.
1541     //
1542     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT);
1543     if (engine_enable_mask != ~engine_disable_mask)
1544     {
1545         NVSWITCH_PRINT(device, WARN,
1546             "NV_NVLSAW_NVSPMC_ENABLE_NVLIPT mismatch: wrote 0x%x, read 0x%x\n",
1547             engine_enable_mask,
1548             ~engine_disable_mask);
1549         NVSWITCH_PRINT(device, WARN,
1550             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NVLIPT readback until supported on fmodel\n");
1551         engine_disable_mask = ~engine_enable_mask;
1552     }
1553     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NVLW, )) - 1;
1554     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1555     {
1556         chip_device->engNVLW[i].valid = NV_FALSE;
1557         for (j = 0; j < NVSWITCH_LINKS_PER_NVLW; j++)
1558         {
1559             idx_link = i * NVSWITCH_LINKS_PER_NVLW + j;
1560             if (idx_link < NVSWITCH_LINK_COUNT(device))
1561             {
1562                 device->link[idx_link].valid = NV_FALSE;
1563                 //
1564                 // TODO: This invalidate used to also invalidate all the
1565                 // associated NVLW engFOO units. This is probably not necessary
1566                 // but code that bypasses the link valid check might touch the
1567                 // underlying units when they are not supposed to.
1568                 //
1569             }
1570         }
1571     }
1572     FOR_EACH_INDEX_IN_MASK_END;
1573 
1574     // Enable the NPORT units that have been discovered
1575     engine_enable_mask = 0;
1576     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NPG, ); i++)
1577     {
1578         if (NVSWITCH_ENG_IS_VALID(device, NPG, i))
1579         {
1580             engine_enable_mask |= NVBIT(i);
1581         }
1582     }
1583     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG, engine_enable_mask);
1584 
1585     //
1586     // In bare metal we write ENABLE_NPG to enable the units that aren't
1587     // disabled by ROM configuration, platform configuration, or regkey override.
1588     // If we are running inside a VM, the hypervisor has already set ENABLE_NPG
1589     // and write protected it.  Reading ENABLE_NPG tells us which units we
1590     // are allowed to use inside this VM.
1591     //
1592     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG);
1593     if (engine_enable_mask != ~engine_disable_mask)
1594     {
1595         NVSWITCH_PRINT(device, WARN,
1596             "NV_NVLSAW_NVSPMC_ENABLE_NPG mismatch: wrote 0x%x, read 0x%x\n",
1597             engine_enable_mask,
1598             ~engine_disable_mask);
1599         NVSWITCH_PRINT(device, WARN,
1600             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NPG readback until supported on fmodel\n");
1601         engine_disable_mask = ~engine_enable_mask;
1602     }
1603     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NPG, )) - 1;
1604     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1605     {
1606         chip_device->engNPG[i].valid = NV_FALSE;
1607         for (j = 0; j < NVSWITCH_LINKS_PER_NPG; j++)
1608         {
1609             idx_link = i * NVSWITCH_LINKS_PER_NPG + j;
1610 
1611             if (idx_link < NVSWITCH_LINK_COUNT(device))
1612             {
1613                 device->link[idx_link].valid = NV_FALSE;
1614                 //
1615                 // TODO: This invalidate used to also invalidate all the
1616                 // associated NPG engFOO units. This is probably not necessary
1617                 // but code that bypasses the link valid check might touch the
1618                 // underlying units when they are not supposed to.
1619                 //
1620             }
1621         }
1622     }
1623     FOR_EACH_INDEX_IN_MASK_END;
1624 
1625     return NVL_SUCCESS;
1626 }
1627 
1628 //
1629 // Bring units out of warm reset on boot.  Used by driver load.
1630 //
1631 void
1632 nvswitch_init_warm_reset_lr10
1633 (
1634     nvswitch_device *device
1635 )
1636 {
1637     NvU32 idx_npg;
1638     NvU32 idx_nport;
1639     NvU32 nport_mask;
1640     NvU32 nport_disable = 0;
1641 
1642 #if defined(NV_NPG_WARMRESET_NPORTDISABLE)
1643     nport_disable = DRF_NUM(_NPG, _WARMRESET, _NPORTDISABLE, ~nport_mask);
1644 #endif
1645 
1646     //
1647     // Walk the NPGs and build the mask of extant NPORTs
1648     //
1649     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
1650     {
1651         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
1652         {
1653             nport_mask = 0;
1654             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
1655             {
1656                 nport_mask |=
1657                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
1658                     NVBIT(idx_nport) : 0x0);
1659             }
1660 
1661             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
1662                 _NPG, _WARMRESET,
1663                 nport_disable |
1664                 DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, nport_mask));
1665         }
1666     }
1667 }
1668 
1669 /*
1670  * CTRL_NVSWITCH_SET_REMAP_POLICY
1671  */
1672 
1673 NvlStatus
1674 nvswitch_get_remap_table_selector_lr10
1675 (
1676     nvswitch_device *device,
1677     NVSWITCH_TABLE_SELECT_REMAP table_selector,
1678     NvU32 *remap_ram_sel
1679 )
1680 {
1681     NvU32 ram_sel = 0;
1682 
1683     switch (table_selector)
1684     {
1685         case NVSWITCH_TABLE_SELECT_REMAP_PRIMARY:
1686             ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM;
1687             break;
1688         default:
1689             // Unsupported remap table selector
1690             return -NVL_ERR_NOT_SUPPORTED;
1691             break;
1692     }
1693 
1694     if (remap_ram_sel)
1695     {
1696         *remap_ram_sel = ram_sel;
1697     }
1698 
1699     return NVL_SUCCESS;
1700 }
1701 
1702 NvU32
1703 nvswitch_get_ingress_ram_size_lr10
1704 (
1705     nvswitch_device *device,
1706     NvU32 ingress_ram_selector      // NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT*
1707 )
1708 {
1709     NvU32 ram_size = 0;
1710 
1711     switch (ingress_ram_selector)
1712     {
1713         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM:
1714             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_REMAPTAB_DEPTH + 1;
1715             break;
1716         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM:
1717             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH + 1;
1718             break;
1719         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM:
1720             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH + 1;
1721             break;
1722         default:
1723             // Unsupported ingress RAM selector
1724             break;
1725     }
1726 
1727     return ram_size;
1728 }
1729 
1730 static void
1731 _nvswitch_set_remap_policy_lr10
1732 (
1733     nvswitch_device *device,
1734     NvU32 portNum,
1735     NvU32 firstIndex,
1736     NvU32 numEntries,
1737     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy
1738 )
1739 {
1740     NvU32 i;
1741     NvU32 remap_address;
1742     NvU32 address_offset;
1743     NvU32 address_base;
1744     NvU32 address_limit;
1745 
1746     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
1747         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
1748         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
1749         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
1750 
1751     for (i = 0; i < numEntries; i++)
1752     {
1753         // Set each field if enabled, else set it to 0.
1754         remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_policy[i].address);
1755         address_offset = DRF_VAL64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, remap_policy[i].addressOffset);
1756         address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, remap_policy[i].addressBase);
1757         address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, remap_policy[i].addressLimit);
1758 
1759         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA1,
1760             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) |
1761             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk));
1762         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA2,
1763             DRF_NUM(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep) |
1764             DRF_NUM(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, address_offset));
1765         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA3,
1766             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_BASE, address_base) |
1767             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, address_limit));
1768         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA4,
1769             DRF_NUM(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy[i].targetId) |
1770             DRF_NUM(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy[i].flags));
1771 
1772         // Write last and auto-increment
1773         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA0,
1774             DRF_NUM(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_address) |
1775             DRF_NUM(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) |
1776             DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid));
1777     }
1778 }
1779 
1780 NvlStatus
1781 nvswitch_ctrl_set_remap_policy_lr10
1782 (
1783     nvswitch_device *device,
1784     NVSWITCH_SET_REMAP_POLICY *p
1785 )
1786 {
1787     NvU32 i;
1788     NvU32 rfunc;
1789     NvU32 ram_size;
1790     NvlStatus retval = NVL_SUCCESS;
1791 
1792     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
1793     {
1794         NVSWITCH_PRINT(device, ERROR,
1795             "NPORT port #%d not valid\n",
1796             p->portNum);
1797         return -NVL_BAD_ARGS;
1798     }
1799 
1800     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
1801     {
1802         NVSWITCH_PRINT(device, ERROR,
1803             "Remap table #%d not supported\n",
1804             p->tableSelect);
1805         return -NVL_ERR_NOT_SUPPORTED;
1806     }
1807 
1808     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
1809     if ((p->firstIndex >= ram_size) ||
1810         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
1811         (p->firstIndex + p->numEntries > ram_size))
1812     {
1813         NVSWITCH_PRINT(device, ERROR,
1814             "remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
1815             p->firstIndex, p->firstIndex + p->numEntries - 1,
1816             0, ram_size - 1,
1817             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
1818         return -NVL_BAD_ARGS;
1819     }
1820 
1821     for (i = 0; i < p->numEntries; i++)
1822     {
1823         if (p->remapPolicy[i].targetId &
1824             ~DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID))
1825         {
1826             NVSWITCH_PRINT(device, ERROR,
1827                 "remapPolicy[%d].targetId 0x%x out of valid range (0x%x..0x%x)\n",
1828                 i, p->remapPolicy[i].targetId,
1829                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID));
1830             return -NVL_BAD_ARGS;
1831         }
1832 
1833         if (p->remapPolicy[i].irlSelect &
1834             ~DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL))
1835         {
1836             NVSWITCH_PRINT(device, ERROR,
1837                 "remapPolicy[%d].irlSelect 0x%x out of valid range (0x%x..0x%x)\n",
1838                 i, p->remapPolicy[i].irlSelect,
1839                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL));
1840             return -NVL_BAD_ARGS;
1841         }
1842 
1843         rfunc = p->remapPolicy[i].flags &
1844             (
1845                 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR |
1846                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK |
1847                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE |
1848                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE |
1849                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET
1850             );
1851         if (rfunc != p->remapPolicy[i].flags)
1852         {
1853             NVSWITCH_PRINT(device, ERROR,
1854                 "remapPolicy[%d].flags 0x%x has undefined flags (0x%x)\n",
1855                 i, p->remapPolicy[i].flags,
1856                 p->remapPolicy[i].flags ^ rfunc);
1857             return -NVL_BAD_ARGS;
1858         }
1859 
1860         // Validate that only bits 46:36 are used
1861         if (p->remapPolicy[i].address &
1862             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10))
1863         {
1864             NVSWITCH_PRINT(device, ERROR,
1865                 "remapPolicy[%d].address 0x%llx & ~0x%llx != 0\n",
1866                 i, p->remapPolicy[i].address,
1867                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10));
1868             return -NVL_BAD_ARGS;
1869         }
1870 
1871         if (p->remapPolicy[i].reqCtxMask &
1872            ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK))
1873         {
1874             NVSWITCH_PRINT(device, ERROR,
1875                 "remapPolicy[%d].reqCtxMask 0x%x out of valid range (0x%x..0x%x)\n",
1876                 i, p->remapPolicy[i].reqCtxMask,
1877                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK));
1878             return -NVL_BAD_ARGS;
1879         }
1880 
1881         if (p->remapPolicy[i].reqCtxChk &
1882             ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK))
1883         {
1884             NVSWITCH_PRINT(device, ERROR,
1885                 "remapPolicy[%d].reqCtxChk 0x%x out of valid range (0x%x..0x%x)\n",
1886                 i, p->remapPolicy[i].reqCtxChk,
1887                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK));
1888             return -NVL_BAD_ARGS;
1889         }
1890 
1891         if (p->remapPolicy[i].reqCtxRep &
1892             ~DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP))
1893         {
1894             NVSWITCH_PRINT(device, ERROR,
1895                 "remapPolicy[%d].reqCtxRep 0x%x out of valid range (0x%x..0x%x)\n",
1896                 i, p->remapPolicy[i].reqCtxRep,
1897                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP));
1898             return -NVL_BAD_ARGS;
1899         }
1900 
1901         if ((p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET) &&
1902             !(p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE))
1903         {
1904             NVSWITCH_PRINT(device, ERROR,
1905                 "remapPolicy[%d].flags: _FLAGS_ADR_OFFSET should not be set if "
1906                 "_FLAGS_ADR_BASE is not set\n",
1907                 i);
1908             return -NVL_BAD_ARGS;
1909         }
1910 
1911         // Validate that only bits 35:20 are used
1912         if (p->remapPolicy[i].addressBase &
1913             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10))
1914         {
1915             NVSWITCH_PRINT(device, ERROR,
1916                 "remapPolicy[%d].addressBase 0x%llx & ~0x%llx != 0\n",
1917                 i, p->remapPolicy[i].addressBase,
1918                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10));
1919             return -NVL_BAD_ARGS;
1920         }
1921 
1922         // Validate that only bits 35:20 are used
1923         if (p->remapPolicy[i].addressLimit &
1924             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10))
1925         {
1926             NVSWITCH_PRINT(device, ERROR,
1927                  "remapPolicy[%d].addressLimit 0x%llx & ~0x%llx != 0\n",
1928                  i, p->remapPolicy[i].addressLimit,
1929                  DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10));
1930             return -NVL_BAD_ARGS;
1931         }
1932 
1933         // Validate base & limit describe a region
1934         if (p->remapPolicy[i].addressBase > p->remapPolicy[i].addressLimit)
1935         {
1936             NVSWITCH_PRINT(device, ERROR,
1937                  "remapPolicy[%d].addressBase/Limit invalid: 0x%llx > 0x%llx\n",
1938                  i, p->remapPolicy[i].addressBase, p->remapPolicy[i].addressLimit);
1939             return -NVL_BAD_ARGS;
1940         }
1941 
1942         // Validate that only bits 35:20 are used
1943         if (p->remapPolicy[i].addressOffset &
1944             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1945         {
1946             NVSWITCH_PRINT(device, ERROR,
1947                 "remapPolicy[%d].addressOffset 0x%llx & ~0x%llx != 0\n",
1948                 i, p->remapPolicy[i].addressOffset,
1949                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10));
1950             return -NVL_BAD_ARGS;
1951         }
1952 
1953         // Validate limit - base + offset doesn't overflow 64G
1954         if ((p->remapPolicy[i].addressLimit - p->remapPolicy[i].addressBase +
1955                 p->remapPolicy[i].addressOffset) &
1956             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1957         {
1958             NVSWITCH_PRINT(device, ERROR,
1959                 "remapPolicy[%d].addressLimit 0x%llx - addressBase 0x%llx + "
1960                 "addressOffset 0x%llx overflows 64GB\n",
1961                 i, p->remapPolicy[i].addressLimit, p->remapPolicy[i].addressBase,
1962                 p->remapPolicy[i].addressOffset);
1963             return -NVL_BAD_ARGS;
1964         }
1965     }
1966 
1967     _nvswitch_set_remap_policy_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->remapPolicy);
1968 
1969     return retval;
1970 }
1971 
1972 /*
1973  * CTRL_NVSWITCH_GET_REMAP_POLICY
1974  */
1975 
1976 #define NVSWITCH_NUM_REMAP_POLICY_REGS_LR10 5
1977 
1978 NvlStatus
1979 nvswitch_ctrl_get_remap_policy_lr10
1980 (
1981     nvswitch_device *device,
1982     NVSWITCH_GET_REMAP_POLICY_PARAMS *params
1983 )
1984 {
1985     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy;
1986     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
1987     NvU32 table_index;
1988     NvU32 remap_count;
1989     NvU32 remap_address;
1990     NvU32 address_offset;
1991     NvU32 address_base;
1992     NvU32 address_limit;
1993     NvU32 ram_size;
1994 
1995     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
1996     {
1997         NVSWITCH_PRINT(device, ERROR,
1998             "NPORT port #%d not valid\n",
1999             params->portNum);
2000         return -NVL_BAD_ARGS;
2001     }
2002 
2003     if (params->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2004     {
2005         NVSWITCH_PRINT(device, ERROR,
2006             "Remap table #%d not supported\n",
2007             params->tableSelect);
2008         return -NVL_ERR_NOT_SUPPORTED;
2009     }
2010 
2011     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2012     if ((params->firstIndex >= ram_size))
2013     {
2014         NVSWITCH_PRINT(device, ERROR,
2015             "%s: remapPolicy first index %d out of range[%d..%d].\n",
2016             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2017         return -NVL_BAD_ARGS;
2018     }
2019 
2020     nvswitch_os_memset(params->entry, 0, (NVSWITCH_REMAP_POLICY_ENTRIES_MAX *
2021         sizeof(NVSWITCH_REMAP_POLICY_ENTRY)));
2022 
2023     table_index = params->firstIndex;
2024     remap_policy = params->entry;
2025     remap_count = 0;
2026 
2027     /* set table offset */
2028     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2029         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2030         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2031         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2032 
2033     while (remap_count < NVSWITCH_REMAP_POLICY_ENTRIES_MAX &&
2034         table_index < ram_size)
2035     {
2036         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2037         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2038         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2039         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2040         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2041 
2042         /* add to remap_entries list if nonzero */
2043         if (remap_policy_data[0] || remap_policy_data[1] || remap_policy_data[2] ||
2044             remap_policy_data[3] || remap_policy_data[4])
2045         {
2046             remap_policy[remap_count].irlSelect =
2047                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy_data[0]);
2048 
2049             remap_policy[remap_count].entryValid =
2050                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy_data[0]);
2051 
2052             remap_address =
2053                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_policy_data[0]);
2054 
2055             remap_policy[remap_count].address =
2056                 DRF_NUM64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_address);
2057 
2058             remap_policy[remap_count].reqCtxMask =
2059                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy_data[1]);
2060 
2061             remap_policy[remap_count].reqCtxChk =
2062                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy_data[1]);
2063 
2064             remap_policy[remap_count].reqCtxRep =
2065                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy_data[2]);
2066 
2067             address_offset =
2068                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, remap_policy_data[2]);
2069 
2070             remap_policy[remap_count].addressOffset =
2071                 DRF_NUM64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, address_offset);
2072 
2073             address_base =
2074                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_BASE, remap_policy_data[3]);
2075 
2076             remap_policy[remap_count].addressBase =
2077                 DRF_NUM64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, address_base);
2078 
2079             address_limit =
2080                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, remap_policy_data[3]);
2081 
2082             remap_policy[remap_count].addressLimit =
2083                 DRF_NUM64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, address_limit);
2084 
2085             remap_policy[remap_count].targetId =
2086                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy_data[4]);
2087 
2088             remap_policy[remap_count].flags =
2089                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy_data[4]);
2090 
2091             remap_count++;
2092         }
2093 
2094         table_index++;
2095     }
2096 
2097     params->nextIndex = table_index;
2098     params->numEntries = remap_count;
2099 
2100     return NVL_SUCCESS;
2101 }
2102 
2103 /*
2104  * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID
2105  */
2106 NvlStatus
2107 nvswitch_ctrl_set_remap_policy_valid_lr10
2108 (
2109     nvswitch_device *device,
2110     NVSWITCH_SET_REMAP_POLICY_VALID *p
2111 )
2112 {
2113     NvU32 remap_ram;
2114     NvU32 ram_address = p->firstIndex;
2115     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
2116     NvU32 i;
2117     NvU32 ram_size;
2118 
2119     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2120     {
2121         NVSWITCH_PRINT(device, ERROR,
2122             "%s: NPORT port #%d not valid\n",
2123             __FUNCTION__, p->portNum);
2124         return -NVL_BAD_ARGS;
2125     }
2126 
2127     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2128     {
2129         NVSWITCH_PRINT(device, ERROR,
2130             "Remap table #%d not supported\n",
2131             p->tableSelect);
2132         return -NVL_ERR_NOT_SUPPORTED;
2133     }
2134 
2135     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2136     if ((p->firstIndex >= ram_size) ||
2137         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
2138         (p->firstIndex + p->numEntries > ram_size))
2139     {
2140         NVSWITCH_PRINT(device, ERROR,
2141             "%s: remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
2142             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2143             0, ram_size - 1,
2144             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
2145         return -NVL_BAD_ARGS;
2146     }
2147 
2148     // Select REMAPPOLICY RAM and disable Auto Increament.
2149     remap_ram =
2150         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2151         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2152 
2153     for (i = 0; i < p->numEntries; i++)
2154     {
2155         /* set the ram address */
2156         remap_ram = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, remap_ram);
2157         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, remap_ram);
2158 
2159         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2160         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2161         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2162         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2163         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2164 
2165         // Set valid bit in REMAPTABDATA0.
2166         remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]);
2167 
2168         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4, remap_policy_data[4]);
2169         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3, remap_policy_data[3]);
2170         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2, remap_policy_data[2]);
2171         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1, remap_policy_data[1]);
2172         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0, remap_policy_data[0]);
2173     }
2174 
2175     return NVL_SUCCESS;
2176 }
2177 
2178 //
2179 // Programming invalid entries to 0x3F causes Route block to detect an invalid port number
2180 // and flag a PRIV error to the FM. (See Table 14.RID RAM Programming, IAS 3.3.4)
2181 //
2182 
2183 #define NVSWITCH_INVALID_PORT_VAL_LR10   0x3F
2184 #define NVSWITCH_INVALID_VC_VAL_LR10     0x0
2185 
2186 #define NVSWITCH_PORTLIST_PORT_LR10(_entry, _idx) \
2187     ((_idx < _entry.numEntries) ? _entry.portList[_idx].destPortNum : NVSWITCH_INVALID_PORT_VAL_LR10)
2188 
2189 #define NVSWITCH_PORTLIST_VC_LR10(_entry, _idx) \
2190     ((_idx < _entry.numEntries) ? _entry.portList[_idx].vcMap : NVSWITCH_INVALID_VC_VAL_LR10)
2191 
2192 /*
2193  * CTRL_NVSWITCH_SET_ROUTING_ID
2194  */
2195 
2196 static void
2197 _nvswitch_set_routing_id_lr10
2198 (
2199     nvswitch_device *device,
2200     NvU32 portNum,
2201     NvU32 firstIndex,
2202     NvU32 numEntries,
2203     NVSWITCH_ROUTING_ID_ENTRY *routing_id
2204 )
2205 {
2206     NvU32 i;
2207     NvU32 rmod;
2208 
2209     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2210         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2211         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2212         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2213 
2214     for (i = 0; i < numEntries; i++)
2215     {
2216         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA1,
2217             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT3,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 3)) |
2218             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE3, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 3))   |
2219             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT4,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 4)) |
2220             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE4, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 4))   |
2221             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT5,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 5)) |
2222             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE5, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 5)));
2223 
2224         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA2,
2225             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT6,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 6)) |
2226             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE6, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 6))   |
2227             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT7,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 7)) |
2228             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE7, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 7))   |
2229             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT8,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 8)) |
2230             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE8, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 8)));
2231 
2232         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA3,
2233             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT9,     NVSWITCH_PORTLIST_PORT_LR10(routing_id[i],  9)) |
2234             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE9,  NVSWITCH_PORTLIST_VC_LR10(routing_id[i],  9))   |
2235             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT10,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 10)) |
2236             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE10, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 10))   |
2237             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT11,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 11)) |
2238             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE11, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 11)));
2239 
2240         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA4,
2241             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT12,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 12)) |
2242             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE12, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 12))   |
2243             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT13,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 13)) |
2244             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE13, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 13))   |
2245             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT14,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 14)) |
2246             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE14, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 14)));
2247 
2248         rmod =
2249             (routing_id[i].useRoutingLan ? NVBIT(6) : 0) |
2250             (routing_id[i].enableIrlErrResponse ? NVBIT(9) : 0);
2251 
2252         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA5,
2253             DRF_NUM(_INGRESS, _RIDTABDATA5, _PORT15,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 15)) |
2254             DRF_NUM(_INGRESS, _RIDTABDATA5, _VC_MODE15, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 15))   |
2255             DRF_NUM(_INGRESS, _RIDTABDATA5, _RMOD,      rmod)                                           |
2256             DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,  routing_id[i].entryValid));
2257 
2258         NVSWITCH_ASSERT(routing_id[i].numEntries <= 16);
2259         // Write last and auto-increment
2260         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA0,
2261             DRF_NUM(_INGRESS, _RIDTABDATA0, _GSIZE,
2262                 (routing_id[i].numEntries == 16) ? 0x0 : routing_id[i].numEntries) |
2263             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT0,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 0)) |
2264             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE0, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 0))   |
2265             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT1,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 1)) |
2266             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE1, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 1))   |
2267             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT2,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 2)) |
2268             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE2, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 2)));
2269     }
2270 }
2271 
2272 #define NVSWITCH_NUM_RIDTABDATA_REGS_LR10 6
2273 
2274 NvlStatus
2275 nvswitch_ctrl_get_routing_id_lr10
2276 (
2277     nvswitch_device *device,
2278     NVSWITCH_GET_ROUTING_ID_PARAMS *params
2279 )
2280 {
2281     NVSWITCH_ROUTING_ID_IDX_ENTRY *rid_entries;
2282     NvU32 table_index;
2283     NvU32 rid_tab_data[NVSWITCH_NUM_RIDTABDATA_REGS_LR10]; // 6 RID tables
2284     NvU32 rid_count;
2285     NvU32 rmod;
2286     NvU32 gsize;
2287     NvU32 ram_size;
2288 
2289     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2290     {
2291         NVSWITCH_PRINT(device, ERROR,
2292             "%s: NPORT port #%d not valid\n",
2293             __FUNCTION__, params->portNum);
2294         return -NVL_BAD_ARGS;
2295     }
2296 
2297     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2298     if (params->firstIndex >= ram_size)
2299     {
2300         NVSWITCH_PRINT(device, ERROR,
2301             "%s: routingId first index %d out of range[%d..%d].\n",
2302             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2303         return -NVL_BAD_ARGS;
2304     }
2305 
2306     nvswitch_os_memset(params->entries, 0, sizeof(params->entries));
2307 
2308     table_index = params->firstIndex;
2309     rid_entries = params->entries;
2310     rid_count = 0;
2311 
2312     /* set table offset */
2313     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2314         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2315         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2316         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2317 
2318     while (rid_count < NVSWITCH_ROUTING_ID_ENTRIES_MAX &&
2319            table_index < ram_size)
2320     {
2321         rid_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2322         rid_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2323         rid_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2324         rid_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2325         rid_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2326         rid_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2327 
2328         /* add to rid_entries list if nonzero */
2329         if (rid_tab_data[0] || rid_tab_data[1] || rid_tab_data[2] ||
2330             rid_tab_data[3] || rid_tab_data[4] || rid_tab_data[5])
2331         {
2332             rid_entries[rid_count].entry.portList[0].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT0, rid_tab_data[0]);
2333             rid_entries[rid_count].entry.portList[0].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE0, rid_tab_data[0]);
2334 
2335             rid_entries[rid_count].entry.portList[1].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT1, rid_tab_data[0]);
2336             rid_entries[rid_count].entry.portList[1].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE1, rid_tab_data[0]);
2337 
2338             rid_entries[rid_count].entry.portList[2].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT2, rid_tab_data[0]);
2339             rid_entries[rid_count].entry.portList[2].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE2, rid_tab_data[0]);
2340 
2341             rid_entries[rid_count].entry.portList[3].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT3, rid_tab_data[1]);
2342             rid_entries[rid_count].entry.portList[3].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE3, rid_tab_data[1]);
2343 
2344             rid_entries[rid_count].entry.portList[4].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT4, rid_tab_data[1]);
2345             rid_entries[rid_count].entry.portList[4].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE4, rid_tab_data[1]);
2346 
2347             rid_entries[rid_count].entry.portList[5].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT5, rid_tab_data[1]);
2348             rid_entries[rid_count].entry.portList[5].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE5, rid_tab_data[1]);
2349 
2350             rid_entries[rid_count].entry.portList[6].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT6, rid_tab_data[2]);
2351             rid_entries[rid_count].entry.portList[6].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE6, rid_tab_data[2]);
2352 
2353             rid_entries[rid_count].entry.portList[7].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT7, rid_tab_data[2]);
2354             rid_entries[rid_count].entry.portList[7].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE7, rid_tab_data[2]);
2355 
2356             rid_entries[rid_count].entry.portList[8].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT8, rid_tab_data[2]);
2357             rid_entries[rid_count].entry.portList[8].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE8, rid_tab_data[2]);
2358 
2359             rid_entries[rid_count].entry.portList[9].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT9, rid_tab_data[3]);
2360             rid_entries[rid_count].entry.portList[9].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE9, rid_tab_data[3]);
2361 
2362             rid_entries[rid_count].entry.portList[10].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT10, rid_tab_data[3]);
2363             rid_entries[rid_count].entry.portList[10].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE10, rid_tab_data[3]);
2364 
2365             rid_entries[rid_count].entry.portList[11].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT11, rid_tab_data[3]);
2366             rid_entries[rid_count].entry.portList[11].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE11, rid_tab_data[3]);
2367 
2368             rid_entries[rid_count].entry.portList[12].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT12, rid_tab_data[4]);
2369             rid_entries[rid_count].entry.portList[12].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE12, rid_tab_data[4]);
2370 
2371             rid_entries[rid_count].entry.portList[13].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT13, rid_tab_data[4]);
2372             rid_entries[rid_count].entry.portList[13].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE13, rid_tab_data[4]);
2373 
2374             rid_entries[rid_count].entry.portList[14].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT14, rid_tab_data[4]);
2375             rid_entries[rid_count].entry.portList[14].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE14, rid_tab_data[4]);
2376 
2377             rid_entries[rid_count].entry.portList[15].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA5, _PORT15, rid_tab_data[5]);
2378             rid_entries[rid_count].entry.portList[15].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA5, _VC_MODE15, rid_tab_data[5]);
2379             rid_entries[rid_count].entry.entryValid               = DRF_VAL(_INGRESS, _RIDTABDATA5, _ACLVALID, rid_tab_data[5]);
2380 
2381             rmod = DRF_VAL(_INGRESS, _RIDTABDATA5, _RMOD, rid_tab_data[5]);
2382             rid_entries[rid_count].entry.useRoutingLan = (NVBIT(6) & rmod) ? 1 : 0;
2383             rid_entries[rid_count].entry.enableIrlErrResponse = (NVBIT(9) & rmod) ? 1 : 0;
2384 
2385             // Gsize of 16 falls into the 0th entry of GLT region. The _GSIZE field must be mapped accordingly
2386             // to the number of port entries (See IAS, Table 20, Sect 3.4.2.2. Packet Routing).
2387             gsize = DRF_VAL(_INGRESS, _RIDTABDATA0, _GSIZE, rid_tab_data[0]);
2388             rid_entries[rid_count].entry.numEntries = ((gsize == 0) ? 16 : gsize);
2389 
2390             rid_entries[rid_count].idx = table_index;
2391             rid_count++;
2392         }
2393 
2394         table_index++;
2395     }
2396 
2397     params->nextIndex = table_index;
2398     params->numEntries = rid_count;
2399 
2400     return NVL_SUCCESS;
2401 }
2402 
2403 NvlStatus
2404 nvswitch_ctrl_set_routing_id_valid_lr10
2405 (
2406     nvswitch_device *device,
2407     NVSWITCH_SET_ROUTING_ID_VALID *p
2408 )
2409 {
2410     NvU32 rid_ctrl;
2411     NvU32 rid_tab_data0;
2412     NvU32 rid_tab_data1;
2413     NvU32 rid_tab_data2;
2414     NvU32 rid_tab_data3;
2415     NvU32 rid_tab_data4;
2416     NvU32 rid_tab_data5;
2417     NvU32 ram_address = p->firstIndex;
2418     NvU32 i;
2419     NvU32 ram_size;
2420 
2421     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2422     {
2423         NVSWITCH_PRINT(device, ERROR,
2424             "%s: NPORT port #%d not valid\n",
2425             __FUNCTION__, p->portNum);
2426         return -NVL_BAD_ARGS;
2427     }
2428 
2429     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2430     if ((p->firstIndex >= ram_size) ||
2431         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2432         (p->firstIndex + p->numEntries > ram_size))
2433     {
2434         NVSWITCH_PRINT(device, ERROR,
2435             "%s: routingId[%d..%d] overflows range %d..%d or size %d.\n",
2436             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2437             0, ram_size - 1,
2438             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2439         return -NVL_BAD_ARGS;
2440     }
2441 
2442     // Select RID RAM and disable Auto Increment.
2443     rid_ctrl =
2444         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2445         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2446 
2447 
2448     for (i = 0; i < p->numEntries; i++)
2449     {
2450         /* set the ram address */
2451         rid_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rid_ctrl);
2452         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rid_ctrl);
2453 
2454         rid_tab_data0 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2455         rid_tab_data1 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2456         rid_tab_data2 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2457         rid_tab_data3 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2458         rid_tab_data4 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2459         rid_tab_data5 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2460 
2461         // Set the valid bit in _RIDTABDATA5
2462         rid_tab_data5 = FLD_SET_DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,
2463             p->entryValid[i], rid_tab_data5);
2464 
2465         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1, rid_tab_data1);
2466         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2, rid_tab_data2);
2467         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3, rid_tab_data3);
2468         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4, rid_tab_data4);
2469         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5, rid_tab_data5);
2470         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0, rid_tab_data0);
2471     }
2472 
2473     return NVL_SUCCESS;
2474 }
2475 
2476 NvlStatus
2477 nvswitch_ctrl_set_routing_id_lr10
2478 (
2479     nvswitch_device *device,
2480     NVSWITCH_SET_ROUTING_ID *p
2481 )
2482 {
2483     NvU32 i, j;
2484     NvlStatus retval = NVL_SUCCESS;
2485     NvU32 ram_size;
2486 
2487     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2488     {
2489         NVSWITCH_PRINT(device, ERROR,
2490             "NPORT port #%d not valid\n",
2491             p->portNum);
2492         return -NVL_BAD_ARGS;
2493     }
2494 
2495     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2496     if ((p->firstIndex >= ram_size) ||
2497         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2498         (p->firstIndex + p->numEntries > ram_size))
2499     {
2500         NVSWITCH_PRINT(device, ERROR,
2501             "routingId[%d..%d] overflows range %d..%d or size %d.\n",
2502             p->firstIndex, p->firstIndex + p->numEntries - 1,
2503             0, ram_size - 1,
2504             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2505         return -NVL_BAD_ARGS;
2506     }
2507 
2508     for (i = 0; i < p->numEntries; i++)
2509     {
2510         if ((p->routingId[i].numEntries < 1) ||
2511             (p->routingId[i].numEntries > NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX))
2512         {
2513             NVSWITCH_PRINT(device, ERROR,
2514                 "routingId[%d].portList[] size %d overflows range %d..%d\n",
2515                 i, p->routingId[i].numEntries,
2516                 1, NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX);
2517             return -NVL_BAD_ARGS;
2518         }
2519 
2520         for (j = 0; j < p->routingId[i].numEntries; j++)
2521         {
2522             if (p->routingId[i].portList[j].vcMap > DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0))
2523             {
2524                 NVSWITCH_PRINT(device, ERROR,
2525                     "routingId[%d].portList[%d] vcMap 0x%x out of valid range (0x%x..0x%x)\n",
2526                     i, j,
2527                     p->routingId[i].portList[j].vcMap,
2528                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0));
2529                 return -NVL_BAD_ARGS;
2530             }
2531 
2532             if (p->routingId[i].portList[j].destPortNum > DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0))
2533             {
2534                 NVSWITCH_PRINT(device, ERROR,
2535                     "routingId[%d].portList[%d] destPortNum 0x%x out of valid range (0x%x..0x%x)\n",
2536                     i, j,
2537                     p->routingId[i].portList[j].destPortNum,
2538                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0));
2539                 return -NVL_BAD_ARGS;
2540             }
2541         }
2542     }
2543 
2544     _nvswitch_set_routing_id_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingId);
2545 
2546     return retval;
2547 }
2548 
2549 /*
2550  * CTRL_NVSWITCH_SET_ROUTING_LAN
2551  */
2552 
2553 //
2554 // Check the data field is present in the list.  Return either the data field
2555 // or default if not present.
2556 //
2557 #define NVSWITCH_PORTLIST_VALID_LR10(_entry, _idx, _field, _default) \
2558     ((_idx < _entry.numEntries) ? _entry.portList[_idx]._field  : _default)
2559 
2560 static void
2561 _nvswitch_set_routing_lan_lr10
2562 (
2563     nvswitch_device *device,
2564     NvU32 portNum,
2565     NvU32 firstIndex,
2566     NvU32 numEntries,
2567     NVSWITCH_ROUTING_LAN_ENTRY *routing_lan
2568 )
2569 {
2570     NvU32 i;
2571 
2572     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2573         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2574         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2575         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2576 
2577     for (i = 0; i < numEntries; i++)
2578     {
2579         //
2580         // NOTE: The GRP_SIZE field is 4-bits.  A subgroup is size 1 through 16
2581         // with encoding 0x0=16 and 0x1=1, ..., 0xF=15.
2582         // Programming of GRP_SIZE takes advantage of the inherent masking of
2583         // DRF_NUM to truncate 16 to 0.
2584         // See bug #3300673
2585         //
2586 
2587         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA1,
2588             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSelect, 0)) |
2589             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSize, 1)) |
2590             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSelect, 0)) |
2591             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSize, 1)) |
2592             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSelect, 0)) |
2593             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSize, 1)));
2594 
2595         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA2,
2596             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSelect, 0)) |
2597             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSize, 1)) |
2598             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSelect, 0)) |
2599             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSize, 1)) |
2600             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSelect, 0)) |
2601             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSize, 1)));
2602 
2603         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA3,
2604             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSelect, 0)) |
2605             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSize, 1)) |
2606             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSelect, 0)) |
2607             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSize, 1)) |
2608             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSelect, 0)) |
2609             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSize, 1)));
2610 
2611         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA4,
2612             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSelect, 0)) |
2613             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSize, 1)) |
2614             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSelect, 0)) |
2615             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSize, 1)) |
2616             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSelect, 0)) |
2617             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSize, 1)));
2618 
2619         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA5,
2620             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSelect, 0)) |
2621             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSize, 1)) |
2622             DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,  routing_lan[i].entryValid));
2623 
2624         // Write last and auto-increment
2625         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA0,
2626             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSelect, 0)) |
2627             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSize, 1)) |
2628             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSelect, 0)) |
2629             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSize, 1)) |
2630             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSelect, 0)) |
2631             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSize, 1)));
2632     }
2633 }
2634 
2635 NvlStatus
2636 nvswitch_ctrl_set_routing_lan_lr10
2637 (
2638     nvswitch_device *device,
2639     NVSWITCH_SET_ROUTING_LAN *p
2640 )
2641 {
2642     NvU32 i, j;
2643     NvlStatus retval = NVL_SUCCESS;
2644     NvU32 ram_size;
2645 
2646     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2647     {
2648         NVSWITCH_PRINT(device, ERROR,
2649             "%s: NPORT port #%d not valid\n",
2650             __FUNCTION__, p->portNum);
2651         return -NVL_BAD_ARGS;
2652     }
2653 
2654     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2655     if ((p->firstIndex >= ram_size) ||
2656         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2657         (p->firstIndex + p->numEntries > ram_size))
2658     {
2659         NVSWITCH_PRINT(device, ERROR,
2660             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2661             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2662             0, ram_size - 1,
2663             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2664         return -NVL_BAD_ARGS;
2665     }
2666 
2667     for (i = 0; i < p->numEntries; i++)
2668     {
2669         if (p->routingLan[i].numEntries > NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX)
2670         {
2671             NVSWITCH_PRINT(device, ERROR,
2672                 "%s: routingLan[%d].portList[] size %d overflows range %d..%d\n",
2673                 __FUNCTION__, i, p->routingLan[i].numEntries,
2674                 0, NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX);
2675             return -NVL_BAD_ARGS;
2676         }
2677 
2678         for (j = 0; j < p->routingLan[i].numEntries; j++)
2679         {
2680             if (p->routingLan[i].portList[j].groupSelect > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0))
2681             {
2682                 NVSWITCH_PRINT(device, ERROR,
2683                     "%s: routingLan[%d].portList[%d] groupSelect 0x%x out of valid range (0x%x..0x%x)\n",
2684                     __FUNCTION__, i, j,
2685                     p->routingLan[i].portList[j].groupSelect,
2686                     0, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0));
2687                 return -NVL_BAD_ARGS;
2688             }
2689 
2690             if ((p->routingLan[i].portList[j].groupSize == 0) ||
2691                 (p->routingLan[i].portList[j].groupSize > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1))
2692             {
2693                 NVSWITCH_PRINT(device, ERROR,
2694                     "%s: routingLan[%d].portList[%d] groupSize 0x%x out of valid range (0x%x..0x%x)\n",
2695                     __FUNCTION__, i, j,
2696                     p->routingLan[i].portList[j].groupSize,
2697                     1, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1);
2698                 return -NVL_BAD_ARGS;
2699             }
2700         }
2701     }
2702 
2703     _nvswitch_set_routing_lan_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingLan);
2704 
2705     return retval;
2706 }
2707 
2708 #define NVSWITCH_NUM_RLANTABDATA_REGS_LR10 6
2709 
2710 NvlStatus
2711 nvswitch_ctrl_get_routing_lan_lr10
2712 (
2713     nvswitch_device *device,
2714     NVSWITCH_GET_ROUTING_LAN_PARAMS *params
2715 )
2716 {
2717     NVSWITCH_ROUTING_LAN_IDX_ENTRY *rlan_entries;
2718     NvU32 table_index;
2719     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2720     NvU32 rlan_count;
2721     NvU32 ram_size;
2722 
2723     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2724     {
2725         NVSWITCH_PRINT(device, ERROR,
2726             "%s: NPORT port #%d not valid\n",
2727             __FUNCTION__, params->portNum);
2728         return -NVL_BAD_ARGS;
2729     }
2730 
2731     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2732     if ((params->firstIndex >= ram_size))
2733     {
2734         NVSWITCH_PRINT(device, ERROR,
2735             "%s: routingLan first index %d out of range[%d..%d].\n",
2736             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2737         return -NVL_BAD_ARGS;
2738     }
2739 
2740     nvswitch_os_memset(params->entries, 0, (NVSWITCH_ROUTING_LAN_ENTRIES_MAX *
2741         sizeof(NVSWITCH_ROUTING_LAN_IDX_ENTRY)));
2742 
2743     table_index = params->firstIndex;
2744     rlan_entries = params->entries;
2745     rlan_count = 0;
2746 
2747     /* set table offset */
2748     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2749         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2750         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM)   |
2751         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2752 
2753     while (rlan_count < NVSWITCH_ROUTING_LAN_ENTRIES_MAX &&
2754            table_index < ram_size)
2755     {
2756         /* read one entry */
2757         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2758         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2759         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2760         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2761         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2762         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2763 
2764         /* add to rlan_entries list if nonzero */
2765         if (rlan_tab_data[0] || rlan_tab_data[1] || rlan_tab_data[2] ||
2766             rlan_tab_data[3] || rlan_tab_data[4] || rlan_tab_data[5])
2767         {
2768             rlan_entries[rlan_count].entry.portList[0].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, rlan_tab_data[0]);
2769             rlan_entries[rlan_count].entry.portList[0].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, rlan_tab_data[0]);
2770             if (rlan_entries[rlan_count].entry.portList[0].groupSize == 0)
2771             {
2772                 rlan_entries[rlan_count].entry.portList[0].groupSize = 16;
2773             }
2774 
2775             rlan_entries[rlan_count].entry.portList[1].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, rlan_tab_data[0]);
2776             rlan_entries[rlan_count].entry.portList[1].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, rlan_tab_data[0]);
2777             if (rlan_entries[rlan_count].entry.portList[1].groupSize == 0)
2778             {
2779                 rlan_entries[rlan_count].entry.portList[1].groupSize = 16;
2780             }
2781 
2782             rlan_entries[rlan_count].entry.portList[2].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, rlan_tab_data[0]);
2783             rlan_entries[rlan_count].entry.portList[2].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, rlan_tab_data[0]);
2784             if (rlan_entries[rlan_count].entry.portList[2].groupSize == 0)
2785             {
2786                 rlan_entries[rlan_count].entry.portList[2].groupSize = 16;
2787             }
2788 
2789             rlan_entries[rlan_count].entry.portList[3].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, rlan_tab_data[1]);
2790             rlan_entries[rlan_count].entry.portList[3].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, rlan_tab_data[1]);
2791             if (rlan_entries[rlan_count].entry.portList[3].groupSize == 0)
2792             {
2793                 rlan_entries[rlan_count].entry.portList[3].groupSize = 16;
2794             }
2795 
2796             rlan_entries[rlan_count].entry.portList[4].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, rlan_tab_data[1]);
2797             rlan_entries[rlan_count].entry.portList[4].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, rlan_tab_data[1]);
2798             if (rlan_entries[rlan_count].entry.portList[4].groupSize == 0)
2799             {
2800                 rlan_entries[rlan_count].entry.portList[4].groupSize = 16;
2801             }
2802 
2803             rlan_entries[rlan_count].entry.portList[5].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, rlan_tab_data[1]);
2804             rlan_entries[rlan_count].entry.portList[5].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, rlan_tab_data[1]);
2805             if (rlan_entries[rlan_count].entry.portList[5].groupSize == 0)
2806             {
2807                 rlan_entries[rlan_count].entry.portList[5].groupSize = 16;
2808             }
2809 
2810             rlan_entries[rlan_count].entry.portList[6].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, rlan_tab_data[2]);
2811             rlan_entries[rlan_count].entry.portList[6].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, rlan_tab_data[2]);
2812             if (rlan_entries[rlan_count].entry.portList[6].groupSize == 0)
2813             {
2814                 rlan_entries[rlan_count].entry.portList[6].groupSize = 16;
2815             }
2816 
2817             rlan_entries[rlan_count].entry.portList[7].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, rlan_tab_data[2]);
2818             rlan_entries[rlan_count].entry.portList[7].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, rlan_tab_data[2]);
2819             if (rlan_entries[rlan_count].entry.portList[7].groupSize == 0)
2820             {
2821                 rlan_entries[rlan_count].entry.portList[7].groupSize = 16;
2822             }
2823 
2824             rlan_entries[rlan_count].entry.portList[8].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, rlan_tab_data[2]);
2825             rlan_entries[rlan_count].entry.portList[8].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, rlan_tab_data[2]);
2826             if (rlan_entries[rlan_count].entry.portList[8].groupSize == 0)
2827             {
2828                 rlan_entries[rlan_count].entry.portList[8].groupSize = 16;
2829             }
2830 
2831             rlan_entries[rlan_count].entry.portList[9].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, rlan_tab_data[3]);
2832             rlan_entries[rlan_count].entry.portList[9].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, rlan_tab_data[3]);
2833             if (rlan_entries[rlan_count].entry.portList[9].groupSize == 0)
2834             {
2835                 rlan_entries[rlan_count].entry.portList[9].groupSize = 16;
2836             }
2837 
2838             rlan_entries[rlan_count].entry.portList[10].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, rlan_tab_data[3]);
2839             rlan_entries[rlan_count].entry.portList[10].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, rlan_tab_data[3]);
2840             if (rlan_entries[rlan_count].entry.portList[10].groupSize == 0)
2841             {
2842                 rlan_entries[rlan_count].entry.portList[10].groupSize = 16;
2843             }
2844 
2845             rlan_entries[rlan_count].entry.portList[11].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, rlan_tab_data[3]);
2846             rlan_entries[rlan_count].entry.portList[11].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, rlan_tab_data[3]);
2847             if (rlan_entries[rlan_count].entry.portList[11].groupSize == 0)
2848             {
2849                 rlan_entries[rlan_count].entry.portList[11].groupSize = 16;
2850             }
2851 
2852             rlan_entries[rlan_count].entry.portList[12].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, rlan_tab_data[4]);
2853             rlan_entries[rlan_count].entry.portList[12].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, rlan_tab_data[4]);
2854             if (rlan_entries[rlan_count].entry.portList[12].groupSize == 0)
2855             {
2856                 rlan_entries[rlan_count].entry.portList[12].groupSize = 16;
2857             }
2858 
2859             rlan_entries[rlan_count].entry.portList[13].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, rlan_tab_data[4]);
2860             rlan_entries[rlan_count].entry.portList[13].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, rlan_tab_data[4]);
2861             if (rlan_entries[rlan_count].entry.portList[13].groupSize == 0)
2862             {
2863                 rlan_entries[rlan_count].entry.portList[13].groupSize = 16;
2864             }
2865 
2866             rlan_entries[rlan_count].entry.portList[14].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, rlan_tab_data[4]);
2867             rlan_entries[rlan_count].entry.portList[14].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, rlan_tab_data[4]);
2868             if (rlan_entries[rlan_count].entry.portList[14].groupSize == 0)
2869             {
2870                 rlan_entries[rlan_count].entry.portList[14].groupSize = 16;
2871             }
2872 
2873             rlan_entries[rlan_count].entry.portList[15].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, rlan_tab_data[5]);
2874             rlan_entries[rlan_count].entry.portList[15].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, rlan_tab_data[5]);
2875             if (rlan_entries[rlan_count].entry.portList[15].groupSize == 0)
2876             {
2877                 rlan_entries[rlan_count].entry.portList[15].groupSize = 16;
2878             }
2879 
2880             rlan_entries[rlan_count].entry.entryValid               = DRF_VAL(_INGRESS, _RLANTABDATA5, _ACLVALID, rlan_tab_data[5]);
2881             rlan_entries[rlan_count].entry.numEntries = NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX;
2882             rlan_entries[rlan_count].idx  = table_index;
2883 
2884             rlan_count++;
2885         }
2886 
2887         table_index++;
2888     }
2889 
2890     params->nextIndex  = table_index;
2891     params->numEntries = rlan_count;
2892 
2893     return NVL_SUCCESS;
2894 }
2895 
2896 NvlStatus
2897 nvswitch_ctrl_set_routing_lan_valid_lr10
2898 (
2899     nvswitch_device *device,
2900     NVSWITCH_SET_ROUTING_LAN_VALID *p
2901 )
2902 {
2903     NvU32 rlan_ctrl;
2904     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2905     NvU32 ram_address = p->firstIndex;
2906     NvU32 i;
2907     NvU32 ram_size;
2908 
2909     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2910     {
2911         NVSWITCH_PRINT(device, ERROR,
2912             "%s: NPORT port #%d not valid\n",
2913             __FUNCTION__, p->portNum);
2914         return -NVL_BAD_ARGS;
2915     }
2916 
2917     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2918     if ((p->firstIndex >= ram_size) ||
2919         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2920         (p->firstIndex + p->numEntries > ram_size))
2921     {
2922         NVSWITCH_PRINT(device, ERROR,
2923             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2924             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2925             0, ram_size - 1,
2926             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2927         return -NVL_BAD_ARGS;
2928     }
2929 
2930     // Select RLAN RAM and disable Auto Increament.
2931     rlan_ctrl =
2932         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2933         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2934 
2935     for (i = 0; i < p->numEntries; i++)
2936     {
2937         /* set the RAM address */
2938         rlan_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rlan_ctrl);
2939         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rlan_ctrl);
2940 
2941         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2942         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2943         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2944         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2945         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2946         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2947 
2948         // Set the valid bit in _RLANTABDATA5
2949         rlan_tab_data[5] = FLD_SET_DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,
2950             p->entryValid[i], rlan_tab_data[5]);
2951 
2952         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1, rlan_tab_data[1]);
2953         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2, rlan_tab_data[2]);
2954         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3, rlan_tab_data[3]);
2955         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4, rlan_tab_data[4]);
2956         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5, rlan_tab_data[5]);
2957         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0, rlan_tab_data[0]);
2958     }
2959 
2960     return NVL_SUCCESS;
2961 }
2962 
2963 /*
2964  * @Brief : Send priv ring command and wait for completion
2965  *
2966  * @Description :
2967  *
2968  * @param[in] device        a reference to the device to initialize
2969  * @param[in] cmd           encoded priv ring command
2970  */
2971 NvlStatus
2972 nvswitch_ring_master_cmd_lr10
2973 (
2974     nvswitch_device *device,
2975     NvU32 cmd
2976 )
2977 {
2978     NvU32 value;
2979     NVSWITCH_TIMEOUT timeout;
2980     NvBool           keepPolling;
2981 
2982     NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _RING_COMMAND, cmd);
2983 
2984     nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
2985     do
2986     {
2987         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
2988 
2989         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_COMMAND);
2990         if (FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
2991         {
2992             break;
2993         }
2994 
2995         nvswitch_os_sleep(1);
2996     }
2997     while (keepPolling);
2998 
2999     if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
3000     {
3001         NVSWITCH_PRINT(device, ERROR,
3002             "%s: Timeout waiting for RING_COMMAND == NO_CMD (cmd=0x%x).\n",
3003             __FUNCTION__, cmd);
3004         return -NVL_INITIALIZATION_TOTAL_FAILURE;
3005     }
3006 
3007     return NVL_SUCCESS;
3008 }
3009 
3010 /*
3011  * @brief Process the information read from ROM tables and apply it to device
3012  * settings.
3013  *
3014  * @param[in] device    a reference to the device to query
3015  * @param[in] firmware  Information parsed from ROM tables
3016  */
3017 static void
3018 _nvswitch_process_firmware_info_lr10
3019 (
3020     nvswitch_device *device,
3021     NVSWITCH_FIRMWARE *firmware
3022 )
3023 {
3024     NvU32 idx_link;
3025     NvU64 link_enable_mask;
3026 
3027     if (device->firmware.firmware_size == 0)
3028     {
3029         return;
3030     }
3031 
3032     if (device->firmware.nvlink.link_config_found)
3033     {
3034         link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3035                             (NvU64)device->regkeys.link_enable_mask);
3036         //
3037         // If the link enables were not already overridden by regkey, then
3038         // apply the ROM link enables
3039         //
3040         if (link_enable_mask == NV_U64_MAX)
3041         {
3042             for (idx_link = 0; idx_link < nvswitch_get_num_links(device); idx_link++)
3043             {
3044                 if ((device->firmware.nvlink.link_enable_mask & NVBIT64(idx_link)) == 0)
3045                 {
3046                     device->link[idx_link].valid = NV_FALSE;
3047                 }
3048             }
3049         }
3050     }
3051 }
3052 
3053 void
3054 nvswitch_init_npg_multicast_lr10
3055 (
3056     nvswitch_device *device
3057 )
3058 {
3059     NvU32 idx_npg;
3060     NvU32 idx_nport;
3061     NvU32 nport_mask;
3062 
3063     //
3064     // Walk the NPGs and build the mask of extant NPORTs
3065     //
3066     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
3067     {
3068         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
3069         {
3070             nport_mask = 0;
3071             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
3072             {
3073                 nport_mask |=
3074                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
3075                     NVBIT(idx_nport) : 0x0);
3076             }
3077 
3078             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
3079                 _NPG, _CTRL_PRI_MULTICAST,
3080                 DRF_NUM(_NPG, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3081                 DRF_DEF(_NPG, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3082 
3083             NVSWITCH_NPGPERF_WR32_LR10(device, idx_npg,
3084                 _NPGPERF, _CTRL_PRI_MULTICAST,
3085                 DRF_NUM(_NPGPERF, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3086                 DRF_DEF(_NPGPERF, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3087         }
3088     }
3089 }
3090 
3091 static NvlStatus
3092 nvswitch_clear_nport_rams_lr10
3093 (
3094     nvswitch_device *device
3095 )
3096 {
3097     NvU32 idx_nport;
3098     NvU64 nport_mask = 0;
3099     NvU32 zero_init_mask;
3100     NvU32 val;
3101     NVSWITCH_TIMEOUT timeout;
3102     NvBool           keepPolling;
3103     NvlStatus retval = NVL_SUCCESS;
3104 
3105     // Build the mask of available NPORTs
3106     for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3107     {
3108         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3109         {
3110             nport_mask |= NVBIT64(idx_nport);
3111         }
3112     }
3113 
3114     // Start the HW zero init
3115     zero_init_mask =
3116         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT) |
3117         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_1, _HWINIT) |
3118         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_2, _HWINIT) |
3119         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_3, _HWINIT) |
3120         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_4, _HWINIT) |
3121         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_5, _HWINIT) |
3122         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_6, _HWINIT) |
3123         DRF_DEF(_NPORT, _INITIALIZATION, _LINKTABLEINIT, _HWINIT) |
3124         DRF_DEF(_NPORT, _INITIALIZATION, _REMAPTABINIT,  _HWINIT) |
3125         DRF_DEF(_NPORT, _INITIALIZATION, _RIDTABINIT,    _HWINIT) |
3126         DRF_DEF(_NPORT, _INITIALIZATION, _RLANTABINIT,   _HWINIT);
3127 
3128     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _NPORT, _INITIALIZATION,
3129         zero_init_mask);
3130 
3131     nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
3132 
3133     do
3134     {
3135         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
3136 
3137         // Check each enabled NPORT that is still pending until all are done
3138         for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3139         {
3140             if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport) && (nport_mask & NVBIT64(idx_nport)))
3141             {
3142                 val = NVSWITCH_ENG_RD32_LR10(device, NPORT, idx_nport, _NPORT, _INITIALIZATION);
3143                 if (val == zero_init_mask)
3144                 {
3145                     nport_mask &= ~NVBIT64(idx_nport);
3146                 }
3147             }
3148         }
3149 
3150         if (nport_mask == 0)
3151         {
3152             break;
3153         }
3154 
3155         nvswitch_os_sleep(1);
3156     }
3157     while (keepPolling);
3158 
3159     if (nport_mask != 0)
3160     {
3161         NVSWITCH_PRINT(device, WARN,
3162             "%s: Timeout waiting for NV_NPORT_INITIALIZATION (0x%llx)\n",
3163             __FUNCTION__, nport_mask);
3164         // Bug 2974064: Review this timeout handling (fall through)
3165         retval = -NVL_ERR_INVALID_STATE;
3166     }
3167 
3168     //bug 2737147 requires SW To init this crumbstore setting for LR10
3169     val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0)             |
3170           DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
3171           DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 0)        |
3172           DRF_DEF(_TSTATE, _RAM_ADDRESS, _VC, _VC5_TRANSDONE);
3173 
3174     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _RAM_ADDRESS, val);
3175 
3176     return retval;
3177 }
3178 
3179 static void
3180 _nvswitch_init_nport_ecc_control_lr10
3181 (
3182     nvswitch_device *device
3183 )
3184 {
3185     // Set ingress ECC error limits
3186     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER,
3187         DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3188     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, 1);
3189 
3190     // Set egress ECC error limits
3191     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER,
3192         DRF_NUM(_EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3193     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, 1);
3194 
3195     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER,
3196         DRF_NUM(_EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3197     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, 1);
3198 
3199     // Set route ECC error limits
3200     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER,
3201         DRF_NUM(_ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3202     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER_LIMIT, 1);
3203 
3204     // Set tstate ECC error limits
3205     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3206         DRF_NUM(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3207     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3208 
3209     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3210         DRF_NUM(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3211     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, 1);
3212 
3213     // Set sourcetrack ECC error limits to _PROD value
3214     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3215         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3216     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3217 
3218     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3219         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3220     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3221 
3222     // Enable ECC/parity
3223     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_ECC_CTRL,
3224         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_HDR_ECC_ENABLE, __PROD) |
3225         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD) |
3226         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _REMAPTAB_ECC_ENABLE, __PROD) |
3227         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RIDTAB_ECC_ENABLE, __PROD) |
3228         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RLANTAB_ECC_ENABLE, __PROD));
3229 
3230     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_ECC_CTRL,
3231         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_ECC_ENABLE, __PROD) |
3232         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_PARITY_ENABLE, __PROD) |
3233         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _RAM_OUT_ECC_ENABLE, __PROD) |
3234         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_ECC_ENABLE, __PROD) |
3235         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD));
3236 
3237     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_ECC_CTRL,
3238         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _GLT_ECC_ENABLE, __PROD) |
3239         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _NVS_ECC_ENABLE, __PROD));
3240 
3241     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_ECC_CTRL,
3242         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _CRUMBSTORE_ECC_ENABLE, __PROD) |
3243         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TAGPOOL_ECC_ENABLE, __PROD) |
3244         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TD_TID_ECC_ENABLE, _DISABLE));
3245 
3246     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_ECC_CTRL,
3247         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD) |
3248         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE, _DISABLE) |
3249         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE, __PROD));
3250 }
3251 
3252 static void
3253 _nvswitch_init_cmd_routing
3254 (
3255     nvswitch_device *device
3256 )
3257 {
3258     NvU32 val;
3259 
3260     //Set Hash policy for the requests.
3261     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN1, _SPRAY) |
3262           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN2, _SPRAY) |
3263           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN4, _SPRAY) |
3264           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN7, _SPRAY);
3265     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE0, val);
3266 
3267     // Set Random policy for reponses.
3268     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN16, _RANDOM) |
3269           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN17, _RANDOM);
3270     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE2, val);
3271 }
3272 
3273 static NvlStatus
3274 _nvswitch_init_portstat_counters
3275 (
3276     nvswitch_device *device
3277 )
3278 {
3279     NvlStatus retval;
3280     NvU32 idx_channel;
3281     NVSWITCH_SET_LATENCY_BINS default_latency_bins;
3282     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3283 
3284     chip_device->latency_stats = nvswitch_os_malloc(sizeof(NVSWITCH_LATENCY_STATS_LR10));
3285     if (chip_device->latency_stats == NULL)
3286     {
3287         NVSWITCH_PRINT(device, ERROR, "%s: Failed allocate memory for latency stats\n",
3288             __FUNCTION__);
3289         return -NVL_NO_MEM;
3290     }
3291 
3292     nvswitch_os_memset(chip_device->latency_stats, 0, sizeof(NVSWITCH_LATENCY_STATS_LR10));
3293 
3294     //
3295     // These bin thresholds are values provided by Arch based off
3296     // switch latency expectations.
3297     //
3298     for (idx_channel=0; idx_channel < NVSWITCH_NUM_VCS_LR10; idx_channel++)
3299     {
3300         default_latency_bins.bin[idx_channel].lowThreshold = 120;    // 120ns
3301         default_latency_bins.bin[idx_channel].medThreshold = 200;    // 200ns
3302         default_latency_bins.bin[idx_channel].hiThreshold  = 1000;   // 1us
3303     }
3304 
3305     chip_device->latency_stats->sample_interval_msec = 3000; // 3 second sample interval
3306 
3307     retval = nvswitch_ctrl_set_latency_bins(device, &default_latency_bins);
3308     if (retval != NVL_SUCCESS)
3309     {
3310         NVSWITCH_PRINT(device, ERROR, "%s: Failed to set latency bins\n",
3311             __FUNCTION__);
3312         NVSWITCH_ASSERT(0);
3313         return retval;
3314     }
3315 
3316     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_CONTROL,
3317         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _SWEEPMODE, _SWONDEMAND) |
3318         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _RANGESELECT, _BITS13TO0));
3319 
3320      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_0,
3321          DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_0, _SRCFILTERBIT, 0xFFFFFFFF));
3322 
3323     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_1,
3324         DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_1, _SRCFILTERBIT, 0xF));
3325 
3326     // Set window limit to the maximum value
3327     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_WINDOW_LIMIT, 0xffffffff);
3328 
3329      NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _GLBLLATENCYTIMERCTRL,
3330          DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, _ENABLE));
3331 
3332      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
3333          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
3334          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
3335 
3336      return NVL_SUCCESS;
3337 }
3338 
3339 NvlStatus
3340 nvswitch_init_nxbar_lr10
3341 (
3342     nvswitch_device *device
3343 )
3344 {
3345     NvU32 tileout;
3346 
3347     // Setting this bit will send error detection info to NPG.
3348     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_ERR_CYA,
3349         DRF_DEF(_NXBAR, _TILE_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3350 
3351     for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++)
3352     {
3353         NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_TILEOUT_ERR_CYA(tileout),
3354             DRF_DEF(_NXBAR, _TC_TILEOUT0_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3355     }
3356 
3357     // Enable idle-based clk gating and setup delay count.
3358     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_PRI_NXBAR_TILE_CG,
3359         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_EN, __PROD) |
3360         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_DLY_CNT, __PROD));
3361 
3362     NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_PRI_NXBAR_TC_CG,
3363         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_EN, __PROD) |
3364         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_DLY_CNT, __PROD));
3365 
3366     return NVL_SUCCESS;
3367 }
3368 
3369 NvlStatus
3370 nvswitch_init_nport_lr10
3371 (
3372     nvswitch_device *device
3373 )
3374 {
3375     NvU32 data32, timeout;
3376     NvU32 idx_nport;
3377     NvU32 num_nports;
3378 
3379     num_nports = NVSWITCH_ENG_COUNT(device, NPORT, );
3380 
3381     for (idx_nport = 0; idx_nport < num_nports; idx_nport++)
3382     {
3383         // Find the first valid nport
3384         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3385         {
3386             break;
3387         }
3388     }
3389 
3390     // There were no valid nports
3391     if (idx_nport == num_nports)
3392     {
3393         NVSWITCH_PRINT(device, ERROR, "%s: No valid nports found!\n", __FUNCTION__);
3394         return -NVL_ERR_INVALID_STATE;
3395     }
3396 
3397     _nvswitch_init_nport_ecc_control_lr10(device);
3398 
3399     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _ROUTE, _ROUTE_CONTROL);
3400     data32 = FLD_SET_DRF(_ROUTE, _ROUTE_CONTROL, _URRESPENB, __PROD, data32);
3401     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ROUTE_CONTROL, data32);
3402 
3403     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _EGRESS, _CTRL);
3404     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _DESTINATIONIDCHECKENB, __PROD, data32);
3405     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _CTO_ENB, __PROD, data32);
3406     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTRL, data32);
3407 
3408     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTO_TIMER_LIMIT,
3409         DRF_DEF(_EGRESS, _CTO_TIMER_LIMIT, _LIMIT, __PROD));
3410 
3411     if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) ==
3412         NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE)
3413     {
3414         // ATO Disable
3415         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3416         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _OFF, data32);
3417         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3418     }
3419     else
3420     {
3421         // ATO Enable
3422         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3423         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _ON, data32);
3424         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3425 
3426         // ATO Timeout value
3427         timeout = DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _TIMEOUT, device->regkeys.ato_control);
3428         if (timeout != NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT)
3429         {
3430             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3431                 DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout));
3432         }
3433         else
3434         {
3435             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3436                 DRF_DEF(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, __PROD));
3437         }
3438     }
3439 
3440     if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) ==
3441         NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE)
3442     {
3443         // STO Disable
3444         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3445         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _OFF, data32);
3446         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3447     }
3448     else
3449     {
3450         // STO Enable
3451         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3452         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _ON, data32);
3453         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3454 
3455         // STO Timeout value
3456         timeout = DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _TIMEOUT, device->regkeys.sto_control);
3457         if (timeout != NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT)
3458         {
3459             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3460                 DRF_NUM(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, timeout));
3461         }
3462         else
3463         {
3464             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3465                 DRF_DEF(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, __PROD));
3466         }
3467     }
3468 
3469     //
3470     // WAR for bug 200606509
3471     // Disable CAM for entry 0 to prevent false ATO trigger
3472     //
3473     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _CREQ_CAM_LOCK);
3474     data32 = DRF_NUM(_TSTATE, _CREQ_CAM_LOCK, _ON, 0x1);
3475     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _CREQ_CAM_LOCK, data32);
3476 
3477     //
3478     // WAR for bug 3115824
3479     // Clear CONTAIN_AND_DRAIN during init for links in reset.
3480     // Since SBR does not clear CONTAIN_AND_DRAIN, this will clear the bit
3481     // when the driver is reloaded after an SBR. If the driver has been reloaded
3482     // without an SBR, then CONTAIN_AND_DRAIN will be re-triggered.
3483     //
3484     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CONTAIN_AND_DRAIN,
3485         DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
3486 
3487     return NVL_SUCCESS;
3488 }
3489 
3490 void *
3491 nvswitch_alloc_chipdevice_lr10
3492 (
3493     nvswitch_device *device
3494 )
3495 {
3496     void *chip_device;
3497 
3498     chip_device = nvswitch_os_malloc(sizeof(lr10_device));
3499     if (NULL != chip_device)
3500     {
3501         nvswitch_os_memset(chip_device, 0, sizeof(lr10_device));
3502     }
3503 
3504     device->chip_id = NV_PSMC_BOOT_42_CHIP_ID_LR10;
3505     return(chip_device);
3506 }
3507 
3508 static NvlStatus
3509 nvswitch_initialize_pmgr_lr10
3510 (
3511     nvswitch_device *device
3512 )
3513 {
3514     nvswitch_init_pmgr_lr10(device);
3515     nvswitch_init_pmgr_devices_lr10(device);
3516 
3517     return NVL_SUCCESS;
3518 }
3519 
3520 static NvlStatus
3521 nvswitch_initialize_route_lr10
3522 (
3523     nvswitch_device *device
3524 )
3525 {
3526     NvlStatus retval;
3527 
3528     retval = _nvswitch_init_ganged_link_routing(device);
3529     if (NVL_SUCCESS != retval)
3530     {
3531         NVSWITCH_PRINT(device, ERROR,
3532             "%s: Failed to initialize GLT\n",
3533             __FUNCTION__);
3534         goto nvswitch_initialize_route_exit;
3535     }
3536 
3537     _nvswitch_init_cmd_routing(device);
3538 
3539     // Initialize Portstat Counters
3540     retval = _nvswitch_init_portstat_counters(device);
3541     if (NVL_SUCCESS != retval)
3542     {
3543         NVSWITCH_PRINT(device, ERROR,
3544             "%s: Failed to initialize portstat counters\n",
3545             __FUNCTION__);
3546         goto nvswitch_initialize_route_exit;
3547     }
3548 
3549 nvswitch_initialize_route_exit:
3550     return retval;
3551 }
3552 
3553 
3554 NvlStatus
3555 nvswitch_pri_ring_init_lr10
3556 (
3557     nvswitch_device *device
3558 )
3559 {
3560     NvU32 i;
3561     NvU32 value;
3562     NvBool enumerated = NV_FALSE;
3563     NvlStatus retval = NVL_SUCCESS;
3564 
3565     //
3566     // Sometimes on RTL simulation we see the priv ring initialization fail.
3567     // Retry up to 3 times until this issue is root caused. Bug 1826216.
3568     //
3569     for (i = 0; !enumerated && (i < 3); i++)
3570     {
3571         value = DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ENUMERATE_AND_START_RING);
3572         retval = nvswitch_ring_master_cmd_lr10(device, value);
3573         if (retval != NVL_SUCCESS)
3574         {
3575             NVSWITCH_PRINT(device, ERROR,
3576                 "%s: PRIV ring enumeration failed\n",
3577                 __FUNCTION__);
3578             continue;
3579         }
3580 
3581         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_START_RESULTS);
3582         if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_START_RESULTS, _CONNECTIVITY, _PASS, value))
3583         {
3584             NVSWITCH_PRINT(device, ERROR,
3585                 "%s: PRIV ring connectivity failed\n",
3586                 __FUNCTION__);
3587             continue;
3588         }
3589 
3590         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0);
3591         if (value)
3592         {
3593             NVSWITCH_PRINT(device, ERROR,
3594                 "%s: NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0 = %x\n",
3595                 __FUNCTION__, value);
3596 
3597             if ((!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3598                     _RING_START_CONN_FAULT, 0, value)) ||
3599                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3600                     _DISCONNECT_FAULT, 0, value))      ||
3601                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3602                     _OVERFLOW_FAULT, 0, value)))
3603             {
3604                 NVSWITCH_PRINT(device, ERROR,
3605                     "%s: PRIV ring error interrupt\n",
3606                     __FUNCTION__);
3607             }
3608 
3609             (void)nvswitch_ring_master_cmd_lr10(device,
3610                     DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT));
3611 
3612             continue;
3613         }
3614 
3615         enumerated = NV_TRUE;
3616     }
3617 
3618     if (!enumerated)
3619     {
3620         NVSWITCH_PRINT(device, ERROR,
3621             "%s: Cannot enumerate PRIV ring!\n",
3622             __FUNCTION__);
3623         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3624     }
3625 
3626     return retval;
3627 }
3628 
3629 /*
3630  * @Brief : Initializes an NvSwitch hardware state
3631  *
3632  * @Description :
3633  *
3634  * @param[in] device        a reference to the device to initialize
3635  *
3636  * @returns                 NVL_SUCCESS if the action succeeded
3637  *                          -NVL_BAD_ARGS if bad arguments provided
3638  *                          -NVL_PCI_ERROR if bar info unable to be retrieved
3639  */
3640 NvlStatus
3641 nvswitch_initialize_device_state_lr10
3642 (
3643     nvswitch_device *device
3644 )
3645 {
3646     NvlStatus retval = NVL_SUCCESS;
3647 
3648     // alloc chip-specific device structure
3649     device->chip_device = nvswitch_alloc_chipdevice(device);
3650     if (NULL == device->chip_device)
3651     {
3652         NVSWITCH_PRINT(device, ERROR,
3653             "nvswitch_os_malloc during chip_device creation failed!\n");
3654         retval = -NVL_NO_MEM;
3655         goto nvswitch_initialize_device_state_exit;
3656     }
3657 
3658     NVSWITCH_PRINT(device, SETUP,
3659         "%s: MMIO discovery\n",
3660         __FUNCTION__);
3661     retval = nvswitch_device_discovery(device, NV_SWPTOP_TABLE_BASE_ADDRESS_OFFSET);
3662     if (NVL_SUCCESS != retval)
3663     {
3664         NVSWITCH_PRINT(device, ERROR,
3665             "%s: Engine discovery failed\n",
3666             __FUNCTION__);
3667         goto nvswitch_initialize_device_state_exit;
3668     }
3669 
3670     nvswitch_filter_discovery(device);
3671 
3672     retval = nvswitch_process_discovery(device);
3673     if (NVL_SUCCESS != retval)
3674     {
3675         NVSWITCH_PRINT(device, ERROR,
3676             "%s: Discovery processing failed\n",
3677             __FUNCTION__);
3678         goto nvswitch_initialize_device_state_exit;
3679     }
3680 
3681     // now that we have completed discovery, perform initialization steps that
3682     // depend on engineDescriptors being initialized
3683     //
3684     // Temporary location, really needs to be done somewhere common to all flcnables
3685     if (nvswitch_is_soe_supported(device))
3686     {
3687         flcnablePostDiscoveryInit(device, device->pSoe);
3688     }
3689     else
3690     {
3691         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE post discovery init.\n",
3692             __FUNCTION__);
3693     }
3694 
3695     // Make sure interrupts are disabled before we enable interrupts with the OS.
3696     nvswitch_lib_disable_interrupts(device);
3697 
3698     retval = nvswitch_pri_ring_init(device);
3699     if (retval != NVL_SUCCESS)
3700     {
3701         NVSWITCH_PRINT(device, ERROR, "%s: PRI init failed\n", __FUNCTION__);
3702         goto nvswitch_initialize_device_state_exit;
3703     }
3704 
3705     NVSWITCH_PRINT(device, SETUP,
3706         "%s: Enabled links: 0x%llx\n",
3707         __FUNCTION__,
3708         ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3709         (NvU64)device->regkeys.link_enable_mask) &
3710         ((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device))));
3711 
3712     if (nvswitch_is_soe_supported(device))
3713     {
3714         retval = nvswitch_init_soe(device);
3715         if (NVL_SUCCESS != retval)
3716         {
3717             NVSWITCH_PRINT(device, ERROR, "%s: Init SOE failed\n",
3718                 __FUNCTION__);
3719             goto nvswitch_initialize_device_state_exit;
3720         }
3721     }
3722     else
3723     {
3724         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE init.\n",
3725             __FUNCTION__);
3726     }
3727 
3728     // Read ROM configuration
3729     nvswitch_read_rom_tables(device, &device->firmware);
3730     _nvswitch_process_firmware_info_lr10(device, &device->firmware);
3731 
3732     // Init PMGR info
3733     retval = nvswitch_initialize_pmgr(device);
3734     if (retval != NVL_SUCCESS)
3735     {
3736         NVSWITCH_PRINT(device, ERROR,
3737             "%s: PMGR init failed\n", __FUNCTION__);
3738         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3739         goto nvswitch_initialize_device_state_exit;
3740     }
3741 
3742     retval = nvswitch_init_pll_config(device);
3743     if (retval != NVL_SUCCESS)
3744     {
3745         NVSWITCH_PRINT(device, ERROR,
3746             "%s: failed\n", __FUNCTION__);
3747         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3748         goto nvswitch_initialize_device_state_exit;
3749     }
3750 
3751     //
3752     // PLL init should be done *first* before other hardware init
3753     //
3754     retval = nvswitch_init_pll(device);
3755     if (NVL_SUCCESS != retval)
3756     {
3757         NVSWITCH_PRINT(device, ERROR,
3758             "%s: PLL init failed\n",
3759             __FUNCTION__);
3760         goto nvswitch_initialize_device_state_exit;
3761     }
3762 
3763     //
3764     // Now that software knows the devices and addresses, it must take all
3765     // the wrapper modules out of reset.  It does this by writing to the
3766     // PMC module enable registers.
3767     //
3768 
3769     // Init IP wrappers
3770 //    _nvswitch_init_mc_enable_lr10(device);
3771     retval = nvswitch_initialize_ip_wrappers(device);
3772     if (retval != NVL_SUCCESS)
3773     {
3774         NVSWITCH_PRINT(device, ERROR,
3775             "%s: init failed\n", __FUNCTION__);
3776         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3777         goto nvswitch_initialize_device_state_exit;
3778     }
3779 
3780     nvswitch_init_warm_reset(device);
3781     nvswitch_init_npg_multicast(device);
3782     retval = nvswitch_clear_nport_rams(device);
3783     if (NVL_SUCCESS != retval)
3784     {
3785         NVSWITCH_PRINT(device, ERROR,
3786             "%s: NPORT RAM clear failed\n",
3787             __FUNCTION__);
3788         goto nvswitch_initialize_device_state_exit;
3789     }
3790 
3791     retval = nvswitch_init_nport(device);
3792     if (retval != NVL_SUCCESS)
3793     {
3794         NVSWITCH_PRINT(device, ERROR,
3795             "%s: Init NPORTs failed\n",
3796             __FUNCTION__);
3797         goto nvswitch_initialize_device_state_exit;
3798     }
3799 
3800     retval = nvswitch_init_nxbar(device);
3801     if (retval != NVL_SUCCESS)
3802     {
3803         NVSWITCH_PRINT(device, ERROR,
3804             "%s: Init NXBARs failed\n",
3805             __FUNCTION__);
3806         goto nvswitch_initialize_device_state_exit;
3807     }
3808 
3809     if (device->regkeys.minion_disable != NV_SWITCH_REGKEY_MINION_DISABLE_YES)
3810     {
3811         NVSWITCH_PRINT(device, WARN, "%s: Entering init minion\n", __FUNCTION__);
3812 
3813         retval = nvswitch_init_minion(device);
3814         if (NVL_SUCCESS != retval)
3815         {
3816             NVSWITCH_PRINT(device, ERROR,
3817                 "%s: Init MINIONs failed\n",
3818                 __FUNCTION__);
3819             goto nvswitch_initialize_device_state_exit;
3820         }
3821     }
3822     else
3823     {
3824         NVSWITCH_PRINT(device, INFO, "MINION is disabled via regkey.\n");
3825 
3826         NVSWITCH_PRINT(device, INFO, "%s: Skipping MINION init\n",
3827             __FUNCTION__);
3828     }
3829 
3830     _nvswitch_setup_chiplib_forced_config_lr10(device);
3831 
3832     // Init route
3833     retval = nvswitch_initialize_route(device);
3834     if (retval != NVL_SUCCESS)
3835     {
3836         NVSWITCH_PRINT(device, ERROR,
3837             "%s: route init failed\n", __FUNCTION__);
3838         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3839         goto nvswitch_initialize_device_state_exit;
3840     }
3841 
3842     nvswitch_init_clock_gating(device);
3843 
3844     // Initialize SPI
3845     if (nvswitch_is_spi_supported(device))
3846     {
3847         retval = nvswitch_spi_init(device);
3848         if (NVL_SUCCESS != retval)
3849         {
3850             NVSWITCH_PRINT(device, ERROR,
3851                 "%s: SPI init failed!, rc: %d\n",
3852                 __FUNCTION__, retval);
3853             goto nvswitch_initialize_device_state_exit;
3854         }
3855     }
3856     else
3857     {
3858         NVSWITCH_PRINT(device, ERROR,
3859             "%s: Skipping SPI init.\n",
3860             __FUNCTION__);
3861     }
3862 
3863     // Initialize SMBPBI
3864     if (nvswitch_is_smbpbi_supported(device))
3865     {
3866         retval = nvswitch_smbpbi_init(device);
3867         if (NVL_SUCCESS != retval)
3868         {
3869             NVSWITCH_PRINT(device, ERROR,
3870                 "%s: SMBPBI init failed!, rc: %d\n",
3871                 __FUNCTION__, retval);
3872             goto nvswitch_initialize_device_state_exit;
3873         }
3874     }
3875     else
3876     {
3877         NVSWITCH_PRINT(device, ERROR,
3878             "%s: Skipping SMBPBI init.\n",
3879             __FUNCTION__);
3880     }
3881 
3882     nvswitch_initialize_interrupt_tree(device);
3883 
3884     // Initialize external thermal sensor
3885     retval = nvswitch_init_thermal(device);
3886     if (NVL_SUCCESS != retval)
3887     {
3888         NVSWITCH_PRINT(device, ERROR,
3889             "%s: External Thermal init failed\n",
3890             __FUNCTION__);
3891     }
3892 
3893     return NVL_SUCCESS;
3894 
3895 nvswitch_initialize_device_state_exit:
3896     nvswitch_destroy_device_state(device);
3897 
3898     return retval;
3899 }
3900 
3901 /*
3902  * @Brief : Destroys an NvSwitch hardware state
3903  *
3904  * @Description :
3905  *
3906  * @param[in] device        a reference to the device to initialize
3907  */
3908 void
3909 nvswitch_destroy_device_state_lr10
3910 (
3911     nvswitch_device *device
3912 )
3913 {
3914     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3915 
3916     if (nvswitch_is_soe_supported(device))
3917     {
3918         nvswitch_soe_unregister_events(device);
3919     }
3920 
3921     if (chip_device != NULL)
3922     {
3923         if ((chip_device->latency_stats) != NULL)
3924         {
3925             nvswitch_os_free(chip_device->latency_stats);
3926         }
3927 
3928         if ((chip_device->ganged_link_table) != NULL)
3929         {
3930             nvswitch_os_free(chip_device->ganged_link_table);
3931         }
3932 
3933         nvswitch_free_chipdevice(device);
3934     }
3935 
3936     nvswitch_i2c_destroy(device);
3937 
3938     return;
3939 }
3940 
3941 static void
3942 _nvswitch_set_nvlink_caps_lr10
3943 (
3944     NvU32 *pCaps
3945 )
3946 {
3947     NvU8 tempCaps[NVSWITCH_NVLINK_CAPS_TBL_SIZE];
3948 
3949     nvswitch_os_memset(tempCaps, 0, sizeof(tempCaps));
3950 
3951     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _VALID);
3952     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SUPPORTED);
3953     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_SUPPORTED);
3954     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_ATOMICS);
3955 
3956     // Assume IBM P9 for PPC -- TODO Xavier support.
3957 #if defined(NVCPU_PPC64LE)
3958     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ACCESS);
3959     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ATOMICS);
3960 #endif
3961 
3962     nvswitch_os_memcpy(pCaps, tempCaps, sizeof(tempCaps));
3963 }
3964 
3965 /*
3966  * @brief Determines if a link's lanes are reversed
3967  *
3968  * @param[in] device    a reference to the device to query
3969  * @param[in] linkId    Target link ID
3970  *
3971  * @return NV_TRUE if a link's lanes are reversed
3972  */
3973 NvBool
3974 nvswitch_link_lane_reversed_lr10
3975 (
3976     nvswitch_device *device,
3977     NvU32            linkId
3978 )
3979 {
3980     NvU32 regData;
3981     nvlink_link *link;
3982 
3983     link = nvswitch_get_link(device, linkId);
3984     if (nvswitch_is_link_in_reset(device, link))
3985     {
3986         return NV_FALSE;
3987     }
3988 
3989     regData = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_RX, _CONFIG_RX);
3990 
3991     // HW may reverse the lane ordering or it may be overridden by SW.
3992     if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _REVERSAL_OVERRIDE, _ON, regData))
3993     {
3994         // Overridden
3995         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _LANE_REVERSE, _ON, regData))
3996         {
3997             return NV_TRUE;
3998         }
3999         else
4000         {
4001             return NV_FALSE;
4002         }
4003     }
4004     else
4005     {
4006         // Sensed in HW
4007         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _HW_LANE_REVERSE, _ON, regData))
4008         {
4009             return NV_TRUE;
4010         }
4011         else
4012         {
4013             return NV_FALSE;
4014         }
4015     }
4016 
4017     return NV_FALSE;
4018 }
4019 
4020 NvlStatus
4021 nvswitch_ctrl_get_nvlink_status_lr10
4022 (
4023     nvswitch_device *device,
4024     NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret
4025 )
4026 {
4027     NvlStatus retval = NVL_SUCCESS;
4028     nvlink_link *link;
4029     NvU8 i;
4030     NvU32 linkState, txSublinkStatus, rxSublinkStatus;
4031     nvlink_conn_info conn_info = {0};
4032     NvU64 enabledLinkMask;
4033     NvU32 nvlink_caps_version;
4034 
4035     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
4036     ret->enabledLinkMask = enabledLinkMask;
4037 
4038     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
4039     {
4040         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
4041 
4042         link = nvswitch_get_link(device, i);
4043 
4044         if ((link == NULL) ||
4045             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
4046             (i >= NVSWITCH_NVLINK_MAX_LINKS))
4047         {
4048             continue;
4049         }
4050 
4051         //
4052         // Call the core library to get the remote end information. On the first
4053         // invocation this will also trigger link training, if link-training is
4054         // not externally managed by FM. Therefore it is necessary that this be
4055         // before link status on the link is populated since this call will
4056         // actually change link state.
4057         //
4058         if (device->regkeys.external_fabric_mgmt)
4059         {
4060             nvlink_lib_get_remote_conn_info(link, &conn_info);
4061         }
4062         else
4063         {
4064             nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info, NVLINK_STATE_CHANGE_SYNC);
4065         }
4066 
4067         // Set NVLINK per-link caps
4068         _nvswitch_set_nvlink_caps_lr10(&ret->linkInfo[i].capsTbl);
4069 
4070         ret->linkInfo[i].phyType = NVSWITCH_NVLINK_STATUS_PHY_NVHS;
4071         ret->linkInfo[i].subLinkWidth = nvswitch_get_sublink_width(device, link->linkNumber);
4072 
4073         if (!nvswitch_is_link_in_reset(device, link))
4074         {
4075             linkState = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE);
4076             linkState = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, linkState);
4077 
4078             txSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
4079             txSublinkStatus = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, txSublinkStatus);
4080 
4081             rxSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
4082             rxSublinkStatus = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, rxSublinkStatus);
4083 
4084             ret->linkInfo[i].bLaneReversal = nvswitch_link_lane_reversed_lr10(device, i);
4085         }
4086         else
4087         {
4088             linkState       = NVSWITCH_NVLINK_STATUS_LINK_STATE_INIT;
4089             txSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_OFF;
4090             rxSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_OFF;
4091         }
4092 
4093         ret->linkInfo[i].linkState       = linkState;
4094         ret->linkInfo[i].txSublinkStatus = txSublinkStatus;
4095         ret->linkInfo[i].rxSublinkStatus = rxSublinkStatus;
4096 
4097         nvlink_caps_version = nvswitch_get_caps_nvlink_version(device);
4098         if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0)
4099         {
4100             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0;
4101             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_0;
4102         }
4103         else if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_4_0)
4104         {
4105             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_4_0;
4106             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_4_0;
4107         }
4108         else
4109         {
4110             NVSWITCH_PRINT(device, WARN,
4111                 "%s WARNING: Unknown NVSWITCH_NVLINK_CAPS_NVLINK_VERSION 0x%x\n",
4112                 __FUNCTION__, nvlink_caps_version);
4113             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_INVALID;
4114             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_INVALID;
4115         }
4116 
4117         ret->linkInfo[i].phyVersion = NVSWITCH_NVLINK_STATUS_NVHS_VERSION_1_0;
4118 
4119         if (conn_info.bConnected)
4120         {
4121             ret->linkInfo[i].connected = NVSWITCH_NVLINK_STATUS_CONNECTED_TRUE;
4122             ret->linkInfo[i].remoteDeviceLinkNumber = (NvU8)conn_info.linkNumber;
4123 
4124             ret->linkInfo[i].remoteDeviceInfo.domain = conn_info.domain;
4125             ret->linkInfo[i].remoteDeviceInfo.bus = conn_info.bus;
4126             ret->linkInfo[i].remoteDeviceInfo.device = conn_info.device;
4127             ret->linkInfo[i].remoteDeviceInfo.function = conn_info.function;
4128             ret->linkInfo[i].remoteDeviceInfo.pciDeviceId = conn_info.pciDeviceId;
4129             ret->linkInfo[i].remoteDeviceInfo.deviceType = conn_info.deviceType;
4130 
4131             ret->linkInfo[i].localLinkSid  = link->localSid;
4132             ret->linkInfo[i].remoteLinkSid = link->remoteSid;
4133 
4134             if (0 != conn_info.pciDeviceId)
4135             {
4136                 ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags =
4137                     FLD_SET_DRF(SWITCH_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS,
4138                          _PCI, ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags);
4139             }
4140 
4141             // Does not use loopback
4142             ret->linkInfo[i].loopProperty =
4143                 NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_NONE;
4144         }
4145         else
4146         {
4147             ret->linkInfo[i].connected =
4148                 NVSWITCH_NVLINK_STATUS_CONNECTED_FALSE;
4149             ret->linkInfo[i].remoteDeviceInfo.deviceType =
4150                 NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE;
4151         }
4152 
4153         // Set the device information for the local end of the link
4154         ret->linkInfo[i].localDeviceInfo.domain = device->nvlink_device->pciInfo.domain;
4155         ret->linkInfo[i].localDeviceInfo.bus = device->nvlink_device->pciInfo.bus;
4156         ret->linkInfo[i].localDeviceInfo.device = device->nvlink_device->pciInfo.device;
4157         ret->linkInfo[i].localDeviceInfo.function = device->nvlink_device->pciInfo.function;
4158         ret->linkInfo[i].localDeviceInfo.pciDeviceId = 0xdeadbeef; // TODO
4159         ret->linkInfo[i].localDeviceLinkNumber = i;
4160         ret->linkInfo[i].laneRxdetStatusMask = device->link[i].lane_rxdet_status_mask;
4161         ret->linkInfo[i].localDeviceInfo.deviceType =
4162             NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH;
4163 
4164         // Clock data
4165         ret->linkInfo[i].nvlinkLineRateMbps = nvswitch_minion_get_line_rate_Mbps_lr10(device, i);
4166         ret->linkInfo[i].nvlinkLinkDataRateKiBps = nvswitch_minion_get_data_rate_KiBps_lr10(device, i);
4167         ret->linkInfo[i].nvlinkLinkClockMhz = ret->linkInfo[i].nvlinkLineRateMbps / 32;
4168         ret->linkInfo[i].nvlinkRefClkSpeedMhz = 156;
4169         ret->linkInfo[i].nvlinkRefClkType = NVSWITCH_NVLINK_REFCLK_TYPE_NVHS;
4170 
4171     }
4172     FOR_EACH_INDEX_IN_MASK_END;
4173 
4174 //    NVSWITCH_ASSERT(ret->enabledLinkMask == enabledLinkMask);
4175 
4176     return retval;
4177 }
4178 
4179 NvlStatus
4180 nvswitch_ctrl_get_counters_lr10
4181 (
4182     nvswitch_device *device,
4183     NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret
4184 )
4185 {
4186     nvlink_link *link;
4187     NvU8   i;
4188     NvU32  counterMask;
4189     NvU32  data;
4190     NvU32  val;
4191     NvU64  tx0TlCount;
4192     NvU64  tx1TlCount;
4193     NvU64  rx0TlCount;
4194     NvU64  rx1TlCount;
4195     NvU32  laneId;
4196     NvBool bLaneReversed;
4197     NvlStatus status;
4198     NvBool minion_enabled;
4199 
4200     ct_assert(NVSWITCH_NUM_LANES_LR10 <= NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE);
4201 
4202     link = nvswitch_get_link(device, ret->linkId);
4203     if ((link == NULL) ||
4204         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
4205     {
4206         return -NVL_BAD_ARGS;
4207     }
4208 
4209     minion_enabled = nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
4210 
4211     counterMask = ret->counterMask;
4212 
4213     // Common usage allows one of these to stand for all of them
4214     if (counterMask & (NVSWITCH_NVLINK_COUNTER_TL_TX0 |
4215                        NVSWITCH_NVLINK_COUNTER_TL_TX1 |
4216                        NVSWITCH_NVLINK_COUNTER_TL_RX0 |
4217                        NVSWITCH_NVLINK_COUNTER_TL_RX1))
4218     {
4219         tx0TlCount = nvswitch_read_64bit_counter(device,
4220             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
4221             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
4222         if (NVBIT64(63) & tx0TlCount)
4223         {
4224             ret->bTx0TlCounterOverflow = NV_TRUE;
4225             tx0TlCount &= ~(NVBIT64(63));
4226         }
4227 
4228         tx1TlCount = nvswitch_read_64bit_counter(device,
4229             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(1)),
4230             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(1)));
4231         if (NVBIT64(63) & tx1TlCount)
4232         {
4233             ret->bTx1TlCounterOverflow = NV_TRUE;
4234             tx1TlCount &= ~(NVBIT64(63));
4235         }
4236 
4237         rx0TlCount = nvswitch_read_64bit_counter(device,
4238             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
4239             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
4240         if (NVBIT64(63) & rx0TlCount)
4241         {
4242             ret->bRx0TlCounterOverflow = NV_TRUE;
4243             rx0TlCount &= ~(NVBIT64(63));
4244         }
4245 
4246         rx1TlCount = nvswitch_read_64bit_counter(device,
4247             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(1)),
4248             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(1)));
4249         if (NVBIT64(63) & rx1TlCount)
4250         {
4251             ret->bRx1TlCounterOverflow = NV_TRUE;
4252             rx1TlCount &= ~(NVBIT64(63));
4253         }
4254 
4255         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX0)] = tx0TlCount;
4256         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX1)] = tx1TlCount;
4257         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX0)] = rx0TlCount;
4258         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX1)] = rx1TlCount;
4259     }
4260 
4261     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4262     {
4263         if (minion_enabled)
4264         {
4265             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4266                                     NV_NVLSTAT_RX01, 0, &data);
4267             if (status != NVL_SUCCESS)
4268             {
4269                 return status;
4270             }
4271             data = DRF_VAL(_NVLSTAT, _RX01, _FLIT_CRC_ERRORS_VALUE, data);
4272         }
4273         else
4274         {
4275             // MINION disabled
4276             data = 0;
4277         }
4278 
4279         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)]
4280             = data;
4281     }
4282 
4283     data = 0x0;
4284     bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
4285 
4286     for (laneId = 0; laneId < NVSWITCH_NUM_LANES_LR10; laneId++)
4287     {
4288         //
4289         // HW may reverse the lane ordering or it may be overridden by SW.
4290         // If so, invert the interpretation of the lane CRC errors.
4291         //
4292         i = (NvU8)((bLaneReversed) ? (NVSWITCH_NUM_LANES_LR10 - 1) - laneId : laneId);
4293 
4294         if (minion_enabled)
4295         {
4296             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4297                                     NV_NVLSTAT_DB01, 0, &data);
4298             if (status != NVL_SUCCESS)
4299             {
4300                 return status;
4301             }
4302         }
4303         else
4304         {
4305             // MINION disabled
4306             data = 0;
4307         }
4308 
4309         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId))
4310         {
4311             val = BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId));
4312 
4313             switch (i)
4314             {
4315                 case 0:
4316                     ret->nvlinkCounters[val]
4317                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L0, data);
4318                     break;
4319                 case 1:
4320                     ret->nvlinkCounters[val]
4321                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L1, data);
4322                     break;
4323                 case 2:
4324                     ret->nvlinkCounters[val]
4325                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L2, data);
4326                     break;
4327                 case 3:
4328                     ret->nvlinkCounters[val]
4329                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L3, data);
4330                     break;
4331             }
4332         }
4333     }
4334 
4335     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4336     {
4337         if (minion_enabled)
4338         {
4339             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4340                                     NV_NVLSTAT_TX09, 0, &data);
4341             if (status != NVL_SUCCESS)
4342             {
4343                 return status;
4344             }
4345             data = DRF_VAL(_NVLSTAT, _TX09, _REPLAY_EVENTS_VALUE, data);
4346         }
4347         else
4348         {
4349             // MINION disabled
4350             data = 0;
4351         }
4352 
4353         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)]
4354             = data;
4355     }
4356 
4357     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4358     {
4359         if (minion_enabled)
4360         {
4361             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4362                                     NV_NVLSTAT_LNK1, 0, &data);
4363             if (status != NVL_SUCCESS)
4364             {
4365                 return status;
4366             }
4367             data = DRF_VAL(_NVLSTAT, _LNK1, _ERROR_COUNT1_RECOVERY_EVENTS_VALUE, data);
4368         }
4369         else
4370         {
4371             // MINION disabled
4372             data = 0;
4373         }
4374 
4375         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)]
4376             = data;
4377     }
4378 
4379     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)
4380     {
4381         if (minion_enabled)
4382         {
4383             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4384                                     NV_NVLSTAT_RX00, 0, &data);
4385             if (status != NVL_SUCCESS)
4386             {
4387                 return status;
4388             }
4389             data = DRF_VAL(_NVLSTAT, _RX00, _REPLAY_EVENTS_VALUE, data);
4390         }
4391         else
4392         {
4393             // MINION disabled
4394             data = 0;
4395         }
4396 
4397         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)]
4398             = data;
4399     }
4400 
4401     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)
4402     {
4403         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)] = 0;
4404     }
4405 
4406     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)
4407     {
4408         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)] = 0;
4409     }
4410 
4411     return NVL_SUCCESS;
4412 }
4413 
4414 static void
4415 nvswitch_ctrl_clear_throughput_counters_lr10
4416 (
4417     nvswitch_device *device,
4418     nvlink_link     *link,
4419     NvU32            counterMask
4420 )
4421 {
4422     NvU32 data;
4423 
4424     // TX
4425     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL);
4426     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX0)
4427     {
4428         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX0, 0x1, data);
4429     }
4430     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX1)
4431     {
4432         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX1, 0x1, data);
4433     }
4434     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4435 
4436     // RX
4437     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL);
4438     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX0)
4439     {
4440         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX0, 0x1, data);
4441     }
4442     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX1)
4443     {
4444         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX1, 0x1, data);
4445     }
4446     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4447 }
4448 
4449 static NvlStatus
4450 nvswitch_ctrl_clear_dl_error_counters_lr10
4451 (
4452     nvswitch_device *device,
4453     nvlink_link     *link,
4454     NvU32            counterMask
4455 )
4456 {
4457     NvU32           data;
4458 
4459     if ((!counterMask) ||
4460         (!(counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4461                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4462                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4463                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4464                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4465                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4466                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4467                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4468                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS |
4469                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY |
4470                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY))))
4471     {
4472         NVSWITCH_PRINT(device, INFO,
4473             "%s: Link%d: No error count clear request, counterMask (0x%x). Returning!\n",
4474             __FUNCTION__, link->linkNumber, counterMask);
4475         return NVL_SUCCESS;
4476     }
4477 
4478     // With Minion initialized, send command to minion
4479     if (nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)))
4480     {
4481         return nvswitch_minion_clear_dl_error_counters_lr10(device, link->linkNumber);
4482     }
4483 
4484     // With Minion not-initialized, perform with the registers
4485     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4486     {
4487         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4488         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_FLIT_CRC, _CLEAR, data);
4489         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4490         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4491     }
4492 
4493     if (counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4494                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4495                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4496                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4497                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4498                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4499                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4500                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4501                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS))
4502     {
4503         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4504         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_LANE_CRC, _CLEAR, data);
4505         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4506         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS)
4507         {
4508             data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_ECC_COUNTS, _CLEAR, data);
4509         }
4510         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4511     }
4512 
4513     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4514     {
4515         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL);
4516         data = FLD_SET_DRF(_NVLDL_TX, _ERROR_COUNT_CTRL, _CLEAR_REPLAY, _CLEAR, data);
4517         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL, data);
4518     }
4519 
4520     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4521     {
4522         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL);
4523         data = FLD_SET_DRF(_NVLDL_TOP, _ERROR_COUNT_CTRL, _CLEAR_RECOVERY, _CLEAR, data);
4524         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL, data);
4525     }
4526     return NVL_SUCCESS;
4527 }
4528 
4529 /*
4530  * CTRL_NVSWITCH_GET_INFO
4531  *
4532  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4533  * This provides a single API to query for multiple pieces of miscellaneous
4534  * information via a single call.
4535  *
4536  */
4537 
4538 static NvU32
4539 _nvswitch_get_info_chip_id
4540 (
4541     nvswitch_device *device
4542 )
4543 {
4544     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4545 
4546     return (DRF_VAL(_PSMC, _BOOT_42, _CHIP_ID, val));
4547 }
4548 
4549 static NvU32
4550 _nvswitch_get_info_revision_major
4551 (
4552     nvswitch_device *device
4553 )
4554 {
4555     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4556 
4557     return (DRF_VAL(_PSMC, _BOOT_42, _MAJOR_REVISION, val));
4558 }
4559 
4560 static NvU32
4561 _nvswitch_get_info_revision_minor
4562 (
4563     nvswitch_device *device
4564 )
4565 {
4566     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4567 
4568     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_REVISION, val));
4569 }
4570 
4571 static NvU32
4572 _nvswitch_get_info_revision_minor_ext
4573 (
4574     nvswitch_device *device
4575 )
4576 {
4577     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4578 
4579     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_EXTENDED_REVISION, val));
4580 }
4581 
4582 static NvU32
4583 _nvswitch_get_info_voltage
4584 (
4585     nvswitch_device *device
4586 )
4587 {
4588     NvU32 voltage = 0;
4589 
4590     return voltage;
4591 }
4592 
4593 static NvBool
4594 _nvswitch_inforom_nvl_supported
4595 (
4596     nvswitch_device *device
4597 )
4598 {
4599     return NV_FALSE;
4600 }
4601 
4602 static NvBool
4603 _nvswitch_inforom_bbx_supported
4604 (
4605     nvswitch_device *device
4606 )
4607 {
4608     return NV_FALSE;
4609 }
4610 
4611 /*
4612  * CTRL_NVSWITCH_GET_INFO
4613  *
4614  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4615  * This provides a single API to query for multiple pieces of miscellaneous
4616  * information via a single call.
4617  *
4618  */
4619 
4620 NvlStatus
4621 nvswitch_ctrl_get_info_lr10
4622 (
4623     nvswitch_device *device,
4624     NVSWITCH_GET_INFO *p
4625 )
4626 {
4627     NvlStatus retval = NVL_SUCCESS;
4628     NvU32 i;
4629 
4630     if (p->count > NVSWITCH_GET_INFO_COUNT_MAX)
4631     {
4632         NVSWITCH_PRINT(device, ERROR,
4633             "%s: Invalid args\n",
4634             __FUNCTION__);
4635         return -NVL_BAD_ARGS;
4636     }
4637 
4638     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_INFO_COUNT_MAX);
4639 
4640     for (i = 0; i < p->count; i++)
4641     {
4642         switch (p->index[i])
4643         {
4644             case NVSWITCH_GET_INFO_INDEX_ARCH:
4645                 p->info[i] = device->chip_arch;
4646                 break;
4647             case NVSWITCH_GET_INFO_INDEX_PLATFORM:
4648                 if (IS_RTLSIM(device))
4649                 {
4650                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_RTLSIM;
4651                 }
4652                 else if (IS_FMODEL(device))
4653                 {
4654                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_FMODEL;
4655                 }
4656                 else if (IS_EMULATION(device))
4657                 {
4658                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_EMULATION;
4659                 }
4660                 else
4661                 {
4662                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_SILICON;
4663                 }
4664                 break;
4665             case NVSWITCH_GET_INFO_INDEX_IMPL:
4666                 p->info[i] = device->chip_impl;
4667                 break;
4668             case NVSWITCH_GET_INFO_INDEX_CHIPID:
4669                 p->info[i] = _nvswitch_get_info_chip_id(device);
4670                 break;
4671             case NVSWITCH_GET_INFO_INDEX_REVISION_MAJOR:
4672                 p->info[i] = _nvswitch_get_info_revision_major(device);
4673                 break;
4674             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR:
4675                 p->info[i] = _nvswitch_get_info_revision_minor(device);
4676                 break;
4677             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR_EXT:
4678                 p->info[i] = _nvswitch_get_info_revision_minor_ext(device);
4679                 break;
4680             case NVSWITCH_GET_INFO_INDEX_DEVICE_ID:
4681                 p->info[i] = device->nvlink_device->pciInfo.pciDeviceId;
4682                 break;
4683             case NVSWITCH_GET_INFO_INDEX_NUM_PORTS:
4684                 p->info[i] = NVSWITCH_LINK_COUNT(device);
4685                 break;
4686             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_31_0:
4687                 p->info[i] = NvU64_LO32(nvswitch_get_enabled_link_mask(device));
4688                 break;
4689             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_63_32:
4690                 p->info[i] = NvU64_HI32(nvswitch_get_enabled_link_mask(device));
4691                 break;
4692             case NVSWITCH_GET_INFO_INDEX_NUM_VCS:
4693                 p->info[i] = _nvswitch_get_num_vcs_lr10(device);
4694                 break;
4695             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE:
4696                 {
4697                     NvU32 remap_ram_sel;
4698                     NvlStatus status;
4699 
4700                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_PRIMARY, &remap_ram_sel);
4701                     if (status == NVL_SUCCESS)
4702                     {
4703                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4704                     }
4705                     else
4706                     {
4707                         p->info[i] = 0;
4708                     }
4709                 }
4710                 break;
4711             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTA_TABLE_SIZE:
4712                 {
4713                     NvU32 remap_ram_sel;
4714                     NvlStatus status;
4715 
4716                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTA, &remap_ram_sel);
4717                     if (status == NVL_SUCCESS)
4718                     {
4719                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4720                     }
4721                     else
4722                     {
4723                         p->info[i] = 0;
4724                     }
4725                 }
4726                 break;
4727             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTB_TABLE_SIZE:
4728                 {
4729                     NvU32 remap_ram_sel;
4730                     NvlStatus status;
4731 
4732                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTB, &remap_ram_sel);
4733                     if (status == NVL_SUCCESS)
4734                     {
4735                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4736                     }
4737                     else
4738                     {
4739                         p->info[i] = 0;
4740                     }
4741                 }
4742                 break;
4743             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_MULTICAST_TABLE_SIZE:
4744                 {
4745                     NvU32 remap_ram_sel;
4746                     NvlStatus status;
4747 
4748                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_MULTICAST, &remap_ram_sel);
4749                     if (status == NVL_SUCCESS)
4750                     {
4751                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4752                     }
4753                     else
4754                     {
4755                         p->info[i] = 0;
4756                     }
4757                 }
4758                 break;
4759             case NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE:
4760                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
4761                 break;
4762             case NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE:
4763                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
4764                 break;
4765             case NVSWITCH_GET_INFO_INDEX_FREQ_KHZ:
4766                 p->info[i] = device->switch_pll.freq_khz;
4767                 break;
4768             case NVSWITCH_GET_INFO_INDEX_VCOFREQ_KHZ:
4769                 p->info[i] = device->switch_pll.vco_freq_khz;
4770                 break;
4771             case NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT:
4772                 p->info[i] = _nvswitch_get_info_voltage(device);
4773                 break;
4774             case NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID:
4775                 p->info[i] = nvswitch_read_physical_id(device);
4776                 break;
4777             case NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN:
4778                 p->info[i] = device->nvlink_device->pciInfo.domain;
4779                 break;
4780             case NVSWITCH_GET_INFO_INDEX_PCI_BUS:
4781                 p->info[i] = device->nvlink_device->pciInfo.bus;
4782                 break;
4783             case NVSWITCH_GET_INFO_INDEX_PCI_DEVICE:
4784                 p->info[i] = device->nvlink_device->pciInfo.device;
4785                 break;
4786             case NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION:
4787                 p->info[i] = device->nvlink_device->pciInfo.function;
4788                 break;
4789             default:
4790                 NVSWITCH_PRINT(device, ERROR,
4791                     "%s: Undefined NVSWITCH_GET_INFO_INDEX 0x%x\n",
4792                     __FUNCTION__,
4793                     p->index[i]);
4794                 retval = -NVL_BAD_ARGS;
4795                 break;
4796         }
4797     }
4798 
4799     return retval;
4800 }
4801 
4802 NvlStatus
4803 nvswitch_set_nport_port_config_lr10
4804 (
4805     nvswitch_device *device,
4806     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4807 )
4808 {
4809     NvU32   val;
4810 
4811     if (p->requesterLinkID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGID))
4812     {
4813         NVSWITCH_PRINT(device, ERROR,
4814             "%s: Invalid requester RID 0x%x\n",
4815             __FUNCTION__, p->requesterLinkID);
4816         return -NVL_BAD_ARGS;
4817     }
4818 
4819     if (p->requesterLanID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGLAN))
4820     {
4821         NVSWITCH_PRINT(device, ERROR,
4822             "%s: Invalid requester RLAN 0x%x\n",
4823             __FUNCTION__, p->requesterLanID);
4824         return -NVL_BAD_ARGS;
4825     }
4826 
4827     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL);
4828     switch (p->type)
4829     {
4830         case CONNECT_ACCESS_GPU:
4831         case CONNECT_ACCESS_CPU:
4832         case CONNECT_ACCESS_SWITCH:
4833             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, val);
4834             break;
4835         case CONNECT_TRUNK_SWITCH:
4836             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, val);
4837             break;
4838         default:
4839             NVSWITCH_PRINT(device, ERROR,
4840                 "%s: invalid type #%d\n",
4841                 __FUNCTION__, p->type);
4842             return -NVL_BAD_ARGS;
4843     }
4844 
4845     switch(p->count)
4846     {
4847         case CONNECT_COUNT_512:
4848             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _512, val);
4849             break;
4850         case CONNECT_COUNT_1024:
4851             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, val);
4852             break;
4853         case CONNECT_COUNT_2048:
4854             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, val);
4855             break;
4856         default:
4857             NVSWITCH_PRINT(device, ERROR,
4858                 "%s: invalid count #%d\n",
4859                 __FUNCTION__, p->count);
4860             return -NVL_BAD_ARGS;
4861     }
4862     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val);
4863 
4864     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _REQLINKID,
4865         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID, p->requesterLinkID) |
4866         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGLAN, p->requesterLanID));
4867 
4868     return NVL_SUCCESS;
4869 }
4870 
4871 NvlStatus
4872 nvswitch_ctrl_set_switch_port_config_lr10
4873 (
4874     nvswitch_device *device,
4875     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4876 )
4877 {
4878     nvlink_link *link;
4879     NvU32 val;
4880     NvlStatus status;
4881 
4882     if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->portNum, NPORT))
4883     {
4884         NVSWITCH_PRINT(device, ERROR,
4885             "%s: invalid link #%d\n",
4886             __FUNCTION__, p->portNum);
4887         return -NVL_BAD_ARGS;
4888     }
4889 
4890     if (p->enableVC1 && (p->type != CONNECT_TRUNK_SWITCH))
4891     {
4892         NVSWITCH_PRINT(device, ERROR,
4893             "%s: VC1 only allowed on trunk links\n",
4894             __FUNCTION__);
4895         return -NVL_BAD_ARGS;
4896     }
4897 
4898     // Validate chip-specific NPORT settings and program port config settings.
4899     status = nvswitch_set_nport_port_config(device, p);
4900     if (status != NVL_SUCCESS)
4901     {
4902         return status;
4903     }
4904 
4905     link = nvswitch_get_link(device, (NvU8)p->portNum);
4906     if (link == NULL)
4907     {
4908         NVSWITCH_PRINT(device, ERROR,
4909             "%s: invalid link\n",
4910             __FUNCTION__);
4911         return -NVL_ERR_INVALID_STATE;
4912     }
4913 
4914     //
4915     // If ac_coupled_mask is configured during nvswitch_create_link,
4916     // give preference to it.
4917     //
4918     if (device->regkeys.ac_coupled_mask  ||
4919         device->regkeys.ac_coupled_mask2 ||
4920         device->firmware.nvlink.link_ac_coupled_mask)
4921     {
4922         if (link->ac_coupled != p->acCoupled)
4923         {
4924             NVSWITCH_PRINT(device, ERROR,
4925                 "%s: port[%d]: Unsupported AC coupled change (%s)\n",
4926                 __FUNCTION__, p->portNum, p->acCoupled ? "AC" : "DC");
4927             return -NVL_BAD_ARGS;
4928         }
4929     }
4930 
4931     link->ac_coupled = p->acCoupled;
4932 
4933     // AC vs DC mode SYSTEM register
4934     if (link->ac_coupled)
4935     {
4936         //
4937         // In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command
4938         // Here we just setup the register with the proper info
4939         //
4940         val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK,
4941                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL);
4942         val = FLD_SET_DRF(_NVLIPT_LNK,
4943                 _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val);
4944         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
4945                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val);
4946     }
4947 
4948     // If _BUFFER_RDY is asserted, credits are locked.
4949     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL_BUFFER_READY);
4950     if (FLD_TEST_DRF(_NPORT, _CTRL_BUFFER_READY, _BUFFERRDY, _ENABLE, val))
4951     {
4952         NVSWITCH_PRINT(device, SETUP,
4953             "%s: port[%d]: BUFFERRDY already enabled.\n",
4954             __FUNCTION__, p->portNum);
4955         return NVL_SUCCESS;
4956     }
4957 
4958     return NVL_SUCCESS;
4959 }
4960 
4961 NvlStatus
4962 nvswitch_ctrl_set_ingress_request_table_lr10
4963 (
4964     nvswitch_device *device,
4965     NVSWITCH_SET_INGRESS_REQUEST_TABLE *p
4966 )
4967 {
4968     return -NVL_ERR_NOT_SUPPORTED;
4969 }
4970 
4971 NvlStatus
4972 nvswitch_ctrl_get_ingress_request_table_lr10
4973 (
4974     nvswitch_device *device,
4975     NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params
4976 )
4977 {
4978     return -NVL_ERR_NOT_SUPPORTED;
4979 }
4980 
4981 NvlStatus
4982 nvswitch_ctrl_set_ingress_request_valid_lr10
4983 (
4984     nvswitch_device *device,
4985     NVSWITCH_SET_INGRESS_REQUEST_VALID *p
4986 )
4987 {
4988     return -NVL_ERR_NOT_SUPPORTED;
4989 }
4990 
4991 NvlStatus
4992 nvswitch_ctrl_get_ingress_response_table_lr10
4993 (
4994     nvswitch_device *device,
4995     NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params
4996 )
4997 {
4998     return -NVL_ERR_NOT_SUPPORTED;
4999 }
5000 
5001 
5002 NvlStatus
5003 nvswitch_ctrl_set_ingress_response_table_lr10
5004 (
5005     nvswitch_device *device,
5006     NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p
5007 )
5008 {
5009     return -NVL_ERR_NOT_SUPPORTED;
5010 }
5011 
5012 static NvlStatus
5013 nvswitch_ctrl_set_ganged_link_table_lr10
5014 (
5015     nvswitch_device *device,
5016     NVSWITCH_SET_GANGED_LINK_TABLE *p
5017 )
5018 {
5019     return -NVL_ERR_NOT_SUPPORTED;
5020 }
5021 
5022 static NvlStatus
5023 nvswitch_ctrl_get_internal_latency_lr10
5024 (
5025     nvswitch_device *device,
5026     NVSWITCH_GET_INTERNAL_LATENCY *pLatency
5027 )
5028 {
5029     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5030     NvU32 vc_selector = pLatency->vc_selector;
5031     NvU32 idx_nport;
5032 
5033     // Validate VC selector
5034     if (vc_selector >= NVSWITCH_NUM_VCS_LR10)
5035     {
5036         return -NVL_BAD_ARGS;
5037     }
5038 
5039     nvswitch_os_memset(pLatency, 0, sizeof(*pLatency));
5040     pLatency->vc_selector = vc_selector;
5041 
5042     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5043     {
5044         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
5045         {
5046             continue;
5047         }
5048 
5049         pLatency->egressHistogram[idx_nport].low =
5050             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low;
5051         pLatency->egressHistogram[idx_nport].medium =
5052             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium;
5053         pLatency->egressHistogram[idx_nport].high =
5054            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high;
5055         pLatency->egressHistogram[idx_nport].panic =
5056            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic;
5057         pLatency->egressHistogram[idx_nport].count =
5058            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count;
5059     }
5060 
5061     pLatency->elapsed_time_msec =
5062       (chip_device->latency_stats->latency[vc_selector].last_read_time_nsec -
5063        chip_device->latency_stats->latency[vc_selector].start_time_nsec)/1000000ULL;
5064 
5065     chip_device->latency_stats->latency[vc_selector].start_time_nsec =
5066         chip_device->latency_stats->latency[vc_selector].last_read_time_nsec;
5067 
5068     chip_device->latency_stats->latency[vc_selector].count = 0;
5069 
5070     // Clear accum_latency[]
5071     for (idx_nport = 0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5072     {
5073         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low = 0;
5074         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium = 0;
5075         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high = 0;
5076         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic = 0;
5077         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count = 0;
5078     }
5079 
5080     return NVL_SUCCESS;
5081 }
5082 
5083 NvlStatus
5084 nvswitch_ctrl_set_latency_bins_lr10
5085 (
5086     nvswitch_device *device,
5087     NVSWITCH_SET_LATENCY_BINS *pLatency
5088 )
5089 {
5090     NvU32 vc_selector;
5091     const NvU32 freq_mhz = 1330;
5092     const NvU32 switchpll_hz = freq_mhz * 1000000ULL; // TODO: Update this with device->switch_pll.freq_khz after LR10 PLL update
5093     const NvU32 min_threshold = 10;   // Must be > zero to avoid div by zero
5094     const NvU32 max_threshold = 10000;
5095 
5096     // Quick input validation and ns to register value conversion
5097     for (vc_selector = 0; vc_selector < NVSWITCH_NUM_VCS_LR10; vc_selector++)
5098     {
5099         if ((pLatency->bin[vc_selector].lowThreshold > max_threshold)                           ||
5100             (pLatency->bin[vc_selector].lowThreshold < min_threshold)                           ||
5101             (pLatency->bin[vc_selector].medThreshold > max_threshold)                           ||
5102             (pLatency->bin[vc_selector].medThreshold < min_threshold)                           ||
5103             (pLatency->bin[vc_selector].hiThreshold  > max_threshold)                           ||
5104             (pLatency->bin[vc_selector].hiThreshold  < min_threshold)                           ||
5105             (pLatency->bin[vc_selector].lowThreshold > pLatency->bin[vc_selector].medThreshold) ||
5106             (pLatency->bin[vc_selector].medThreshold > pLatency->bin[vc_selector].hiThreshold))
5107         {
5108             return -NVL_BAD_ARGS;
5109         }
5110 
5111         pLatency->bin[vc_selector].lowThreshold =
5112             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].lowThreshold);
5113         pLatency->bin[vc_selector].medThreshold =
5114             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].medThreshold);
5115         pLatency->bin[vc_selector].hiThreshold =
5116             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].hiThreshold);
5117 
5118         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _LOW,    vc_selector, pLatency->bin[vc_selector].lowThreshold);
5119         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _MEDIUM, vc_selector, pLatency->bin[vc_selector].medThreshold);
5120         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _HIGH,   vc_selector, pLatency->bin[vc_selector].hiThreshold);
5121     }
5122 
5123     return NVL_SUCCESS;
5124 }
5125 
5126 #define NV_NPORT_REQLINKID_REQROUTINGLAN_1024  18:18
5127 #define NV_NPORT_REQLINKID_REQROUTINGLAN_2048  18:17
5128 
5129 /*
5130  * @brief Returns the ingress requester link id.
5131  *
5132  * On LR10, REQROUTINGID only gives the endpoint but not the specific port of the response packet.
5133  * To identify the specific port, the routing_ID must be appended with the upper bits of REQROUTINGLAN.
5134  *
5135  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 1024, the upper bit of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[9].
5136  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 2048, the upper two bits of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[10:9].
5137  *
5138  * @param[in] device            nvswitch device
5139  * @param[in] params            NVSWITCH_GET_INGRESS_REQLINKID_PARAMS
5140  *
5141  * @returns                     NVL_SUCCESS if action succeeded,
5142  *                              -NVL_ERR_INVALID_STATE invalid link
5143  */
5144 NvlStatus
5145 nvswitch_ctrl_get_ingress_reqlinkid_lr10
5146 (
5147     nvswitch_device *device,
5148     NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params
5149 )
5150 {
5151     NvU32 regval;
5152     NvU32 reqRid;
5153     NvU32 reqRlan;
5154     NvU32 rlan_shift = DRF_SHIFT_RT(NV_NPORT_REQLINKID_REQROUTINGID) + 1;
5155 
5156     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
5157     {
5158         return -NVL_BAD_ARGS;
5159     }
5160 
5161     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _REQLINKID);
5162     reqRid = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID, regval);
5163     reqRlan = regval;
5164 
5165     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _CTRL);
5166     if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, regval))
5167     {
5168         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_1024, reqRlan);
5169         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5170     }
5171     else if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, regval))
5172     {
5173         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_2048, reqRlan);
5174         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5175     }
5176     else
5177     {
5178         params->requesterLinkID = reqRid;
5179     }
5180 
5181     return NVL_SUCCESS;
5182 }
5183 
5184 /*
5185  * REGISTER_READ/_WRITE
5186  * Provides direct access to the MMIO space for trusted clients like MODS.
5187  * This API should not be exposed to unsecure clients.
5188  */
5189 
5190 /*
5191  * _nvswitch_get_engine_base
5192  * Used by REGISTER_READ/WRITE API.  Looks up an engine based on device/instance
5193  * and returns the base address in BAR0.
5194  *
5195  * register_rw_engine   [in] REGISTER_RW_ENGINE_*
5196  * instance             [in] physical instance of device
5197  * bcast                [in] FALSE: find unicast base address
5198  *                           TRUE:  find broadcast base address
5199  * base_addr            [out] base address in BAR0 of requested device
5200  *
5201  * Returns              NVL_SUCCESS: Device base address successfully found
5202  *                      else device lookup failed
5203  */
5204 
5205 static NvlStatus
5206 _nvswitch_get_engine_base_lr10
5207 (
5208     nvswitch_device *device,
5209     NvU32   register_rw_engine,     // REGISTER_RW_ENGINE_*
5210     NvU32   instance,               // device instance
5211     NvBool  bcast,
5212     NvU32   *base_addr
5213 )
5214 {
5215     NvU32 base = 0;
5216     ENGINE_DESCRIPTOR_TYPE_LR10  *engine = NULL;
5217     NvlStatus retval = NVL_SUCCESS;
5218     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5219 
5220     // Find the engine descriptor matching the request
5221     engine = NULL;
5222 
5223     switch (register_rw_engine)
5224     {
5225         case REGISTER_RW_ENGINE_RAW:
5226             // Special case raw IO
5227             if ((instance != 0) ||
5228                 (bcast != NV_FALSE))
5229             {
5230                 retval = -NVL_BAD_ARGS;
5231             }
5232         break;
5233 
5234         case REGISTER_RW_ENGINE_CLKS:
5235         case REGISTER_RW_ENGINE_FUSE:
5236         case REGISTER_RW_ENGINE_JTAG:
5237         case REGISTER_RW_ENGINE_PMGR:
5238         case REGISTER_RW_ENGINE_XP3G:
5239             //
5240             // Legacy devices are always single-instance, unicast-only.
5241             // These manuals are BAR0 offset-based, not IP-based.  Treat them
5242             // the same as RAW.
5243             //
5244             if ((instance != 0) ||
5245                 (bcast != NV_FALSE))
5246             {
5247                 retval = -NVL_BAD_ARGS;
5248             }
5249             register_rw_engine = REGISTER_RW_ENGINE_RAW;
5250         break;
5251 
5252         case REGISTER_RW_ENGINE_SAW:
5253             if (bcast)
5254             {
5255                 retval = -NVL_BAD_ARGS;
5256             }
5257             else
5258             {
5259                 if (NVSWITCH_ENG_VALID_LR10(device, SAW, instance))
5260                 {
5261                     engine = &chip_device->engSAW[instance];
5262                 }
5263             }
5264         break;
5265 
5266         case REGISTER_RW_ENGINE_XVE:
5267             if (bcast)
5268             {
5269                 retval = -NVL_BAD_ARGS;
5270             }
5271             else
5272             {
5273                 if (NVSWITCH_ENG_VALID_LR10(device, XVE, instance))
5274                 {
5275                     engine = &chip_device->engXVE[instance];
5276                 }
5277             }
5278         break;
5279 
5280         case REGISTER_RW_ENGINE_SOE:
5281             if (bcast)
5282             {
5283                 retval = -NVL_BAD_ARGS;
5284             }
5285             else
5286             {
5287                 if (NVSWITCH_ENG_VALID_LR10(device, SOE, instance))
5288                 {
5289                     engine = &chip_device->engSOE[instance];
5290                 }
5291             }
5292         break;
5293 
5294         case REGISTER_RW_ENGINE_SE:
5295             if (bcast)
5296             {
5297                 retval = -NVL_BAD_ARGS;
5298             }
5299             else
5300             {
5301                 if (NVSWITCH_ENG_VALID_LR10(device, SE, instance))
5302                 {
5303                     engine = &chip_device->engSE[instance];
5304                 }
5305             }
5306         break;
5307 
5308         case REGISTER_RW_ENGINE_NVLW:
5309             if (bcast)
5310             {
5311                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW_BCAST, instance))
5312                 {
5313                     engine = &chip_device->engNVLW_BCAST[instance];
5314                 }
5315             }
5316             else
5317             {
5318                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW, instance))
5319                 {
5320                     engine = &chip_device->engNVLW[instance];
5321                 }
5322             }
5323         break;
5324 
5325         case REGISTER_RW_ENGINE_MINION:
5326             if (bcast)
5327             {
5328                 if (NVSWITCH_ENG_VALID_LR10(device, MINION_BCAST, instance))
5329                 {
5330                     engine = &chip_device->engMINION_BCAST[instance];
5331                 }
5332             }
5333             else
5334             {
5335                 if (NVSWITCH_ENG_VALID_LR10(device, MINION, instance))
5336                 {
5337                     engine = &chip_device->engMINION[instance];
5338                 }
5339             }
5340         break;
5341 
5342         case REGISTER_RW_ENGINE_NVLIPT:
5343             if (bcast)
5344             {
5345                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_BCAST, instance))
5346                 {
5347                     engine = &chip_device->engNVLIPT_BCAST[instance];
5348                 }
5349             }
5350             else
5351             {
5352                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, instance))
5353                 {
5354                     engine = &chip_device->engNVLIPT[instance];
5355                 }
5356             }
5357         break;
5358 
5359         case REGISTER_RW_ENGINE_NVLTLC:
5360             if (bcast)
5361             {
5362                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_BCAST, instance))
5363                 {
5364                     engine = &chip_device->engNVLTLC_BCAST[instance];
5365                 }
5366             }
5367             else
5368             {
5369                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC, instance))
5370                 {
5371                     engine = &chip_device->engNVLTLC[instance];
5372                 }
5373             }
5374         break;
5375 
5376         case REGISTER_RW_ENGINE_NVLTLC_MULTICAST:
5377             if (bcast)
5378             {
5379                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST_BCAST, instance))
5380                 {
5381                     engine = &chip_device->engNVLTLC_MULTICAST_BCAST[instance];
5382                 }
5383             }
5384             else
5385             {
5386                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST, instance))
5387                 {
5388                     engine = &chip_device->engNVLTLC_MULTICAST[instance];
5389                 }
5390             }
5391         break;
5392 
5393         case REGISTER_RW_ENGINE_NPG:
5394             if (bcast)
5395             {
5396                 if (NVSWITCH_ENG_VALID_LR10(device, NPG_BCAST, instance))
5397                 {
5398                     engine = &chip_device->engNPG_BCAST[instance];
5399                 }
5400             }
5401             else
5402             {
5403                 if (NVSWITCH_ENG_VALID_LR10(device, NPG, instance))
5404                 {
5405                     engine = &chip_device->engNPG[instance];
5406                 }
5407             }
5408         break;
5409 
5410         case REGISTER_RW_ENGINE_NPORT:
5411             if (bcast)
5412             {
5413                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_BCAST, instance))
5414                 {
5415                     engine = &chip_device->engNPORT_BCAST[instance];
5416                 }
5417             }
5418             else
5419             {
5420                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT, instance))
5421                 {
5422                     engine = &chip_device->engNPORT[instance];
5423                 }
5424             }
5425         break;
5426 
5427         case REGISTER_RW_ENGINE_NPORT_MULTICAST:
5428             if (bcast)
5429             {
5430                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST_BCAST, instance))
5431                 {
5432                     engine = &chip_device->engNPORT_MULTICAST_BCAST[instance];
5433                 }
5434             }
5435             else
5436             {
5437                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST, instance))
5438                 {
5439                     engine = &chip_device->engNPORT_MULTICAST[instance];
5440                 }
5441             }
5442         break;
5443 
5444         case REGISTER_RW_ENGINE_NVLIPT_LNK:
5445             if (bcast)
5446             {
5447                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_BCAST, instance))
5448                 {
5449                     engine = &chip_device->engNVLIPT_LNK_BCAST[instance];
5450                 }
5451             }
5452             else
5453             {
5454                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK, instance))
5455                 {
5456                     engine = &chip_device->engNVLIPT_LNK[instance];
5457                 }
5458             }
5459         break;
5460 
5461         case REGISTER_RW_ENGINE_NVLIPT_LNK_MULTICAST:
5462             if (bcast)
5463             {
5464                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST_BCAST, instance))
5465                 {
5466                     engine = &chip_device->engNVLIPT_LNK_MULTICAST_BCAST[instance];
5467                 }
5468             }
5469             else
5470             {
5471                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST, instance))
5472                 {
5473                     engine = &chip_device->engNVLIPT_LNK_MULTICAST[instance];
5474                 }
5475             }
5476         break;
5477 
5478         case REGISTER_RW_ENGINE_PLL:
5479             if (bcast)
5480             {
5481                 if (NVSWITCH_ENG_VALID_LR10(device, PLL_BCAST, instance))
5482                 {
5483                     engine = &chip_device->engPLL_BCAST[instance];
5484                 }
5485             }
5486             else
5487             {
5488                 if (NVSWITCH_ENG_VALID_LR10(device, PLL, instance))
5489                 {
5490                     engine = &chip_device->engPLL[instance];
5491                 }
5492             }
5493         break;
5494 
5495         case REGISTER_RW_ENGINE_NVLDL:
5496             if (bcast)
5497             {
5498                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_BCAST, instance))
5499                 {
5500                     engine = &chip_device->engNVLDL_BCAST[instance];
5501                 }
5502             }
5503             else
5504             {
5505                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL, instance))
5506                 {
5507                     engine = &chip_device->engNVLDL[instance];
5508                 }
5509             }
5510         break;
5511 
5512         case REGISTER_RW_ENGINE_NVLDL_MULTICAST:
5513             if (bcast)
5514             {
5515                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST_BCAST, instance))
5516                 {
5517                     engine = &chip_device->engNVLDL_MULTICAST_BCAST[instance];
5518                 }
5519             }
5520             else
5521             {
5522                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST, instance))
5523                 {
5524                     engine = &chip_device->engNVLDL_MULTICAST[instance];
5525                 }
5526             }
5527         break;
5528 
5529         case REGISTER_RW_ENGINE_NXBAR:
5530             if (bcast)
5531             {
5532                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR_BCAST, instance))
5533                 {
5534                     engine = &chip_device->engNXBAR_BCAST[instance];
5535                 }
5536             }
5537             else
5538             {
5539                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR, instance))
5540                 {
5541                     engine = &chip_device->engNXBAR[instance];
5542                 }
5543             }
5544         break;
5545 
5546         case REGISTER_RW_ENGINE_TILE:
5547             if (bcast)
5548             {
5549                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_BCAST, instance))
5550                 {
5551                     engine = &chip_device->engTILE_BCAST[instance];
5552                 }
5553             }
5554             else
5555             {
5556                 if (NVSWITCH_ENG_VALID_LR10(device, TILE, instance))
5557                 {
5558                     engine = &chip_device->engTILE[instance];
5559                 }
5560             }
5561         break;
5562 
5563         case REGISTER_RW_ENGINE_TILE_MULTICAST:
5564             if (bcast)
5565             {
5566                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST_BCAST, instance))
5567                 {
5568                     engine = &chip_device->engTILE_MULTICAST_BCAST[instance];
5569                 }
5570             }
5571             else
5572             {
5573                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST, instance))
5574                 {
5575                     engine = &chip_device->engTILE_MULTICAST[instance];
5576                 }
5577             }
5578         break;
5579 
5580         default:
5581             NVSWITCH_PRINT(device, ERROR,
5582                 "%s: unknown REGISTER_RW_ENGINE 0x%x\n",
5583                 __FUNCTION__,
5584                 register_rw_engine);
5585             engine = NULL;
5586         break;
5587     }
5588 
5589     if (register_rw_engine == REGISTER_RW_ENGINE_RAW)
5590     {
5591         // Raw IO -- client provides full BAR0 offset
5592         base = 0;
5593     }
5594     else
5595     {
5596         // Check engine descriptor was found and valid
5597         if (engine == NULL)
5598         {
5599             retval = -NVL_BAD_ARGS;
5600             NVSWITCH_PRINT(device, ERROR,
5601                 "%s: invalid REGISTER_RW_ENGINE/instance 0x%x(%d)\n",
5602                 __FUNCTION__,
5603                 register_rw_engine,
5604                 instance);
5605         }
5606         else if (!engine->valid)
5607         {
5608             retval = -NVL_UNBOUND_DEVICE;
5609             NVSWITCH_PRINT(device, ERROR,
5610                 "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) disabled or invalid\n",
5611                 __FUNCTION__,
5612                 register_rw_engine,
5613                 instance);
5614         }
5615         else
5616         {
5617             if (bcast && (engine->disc_type == DISCOVERY_TYPE_BROADCAST))
5618             {
5619                 //
5620                 // Caveat emptor: A read of a broadcast register is
5621                 // implementation-specific.
5622                 //
5623                 base = engine->info.bc.bc_addr;
5624             }
5625             else if ((!bcast) && (engine->disc_type == DISCOVERY_TYPE_UNICAST))
5626             {
5627                 base = engine->info.uc.uc_addr;
5628             }
5629 
5630             if (base == 0)
5631             {
5632                 NVSWITCH_PRINT(device, ERROR,
5633                     "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) has %s base address 0!\n",
5634                     __FUNCTION__,
5635                     register_rw_engine,
5636                     instance,
5637                     (bcast ? "BCAST" : "UNICAST" ));
5638                 retval = -NVL_IO_ERROR;
5639             }
5640         }
5641     }
5642 
5643     *base_addr = base;
5644     return retval;
5645 }
5646 
5647 /*
5648  * CTRL_NVSWITCH_REGISTER_READ
5649  *
5650  * This provides direct access to the MMIO space for trusted clients like
5651  * MODS.
5652  * This API should not be exposed to unsecure clients.
5653  */
5654 
5655 static NvlStatus
5656 nvswitch_ctrl_register_read_lr10
5657 (
5658     nvswitch_device *device,
5659     NVSWITCH_REGISTER_READ *p
5660 )
5661 {
5662     NvU32 base;
5663     NvU32 data;
5664     NvlStatus retval = NVL_SUCCESS;
5665 
5666     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, NV_FALSE, &base);
5667     if (retval != NVL_SUCCESS)
5668     {
5669         return retval;
5670     }
5671 
5672     // Make sure target offset isn't out-of-range
5673     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5674     {
5675         return -NVL_IO_ERROR;
5676     }
5677 
5678     //
5679     // Some legacy device manuals are not 0-based (IP style).
5680     //
5681     data = NVSWITCH_OFF_RD32(device, base + p->offset);
5682     p->val = data;
5683 
5684     return NVL_SUCCESS;
5685 }
5686 
5687 /*
5688  * CTRL_NVSWITCH_REGISTER_WRITE
5689  *
5690  * This provides direct access to the MMIO space for trusted clients like
5691  * MODS.
5692  * This API should not be exposed to unsecure clients.
5693  */
5694 
5695 static NvlStatus
5696 nvswitch_ctrl_register_write_lr10
5697 (
5698     nvswitch_device *device,
5699     NVSWITCH_REGISTER_WRITE *p
5700 )
5701 {
5702     NvU32 base;
5703     NvlStatus retval = NVL_SUCCESS;
5704 
5705     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, p->bcast, &base);
5706     if (retval != NVL_SUCCESS)
5707     {
5708         return retval;
5709     }
5710 
5711     // Make sure target offset isn't out-of-range
5712     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5713     {
5714         return -NVL_IO_ERROR;
5715     }
5716 
5717     //
5718     // Some legacy device manuals are not 0-based (IP style).
5719     //
5720     NVSWITCH_OFF_WR32(device, base + p->offset, p->val);
5721 
5722     return NVL_SUCCESS;
5723 }
5724 
5725 NvlStatus
5726 nvswitch_ctrl_get_bios_info_lr10
5727 (
5728     nvswitch_device *device,
5729     NVSWITCH_GET_BIOS_INFO_PARAMS *p
5730 )
5731 {
5732     NvU32 biosVersionBytes;
5733     NvU32 biosOemVersionBytes;
5734     NvU32 biosMagic = 0x9210;
5735 
5736     //
5737     // Example: 92.10.09.00.00 is the formatted version string
5738     //          |         |  |
5739     //          |         |  |__ BIOS OEM version byte
5740     //          |         |
5741     //          |_________|_____ BIOS version bytes
5742     //
5743     biosVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_6);
5744     biosOemVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_7);
5745 
5746     //
5747     // LR10 is built out of core92 and the BIOS version will always begin with
5748     // 92.10.xx.xx.xx
5749     //
5750     if ((biosVersionBytes >> 16) != biosMagic)
5751     {
5752         NVSWITCH_PRINT(device, ERROR,
5753                 "BIOS version not found in scratch register\n");
5754         return -NVL_ERR_INVALID_STATE;
5755     }
5756 
5757     p->version = (((NvU64)biosVersionBytes) << 8) | (biosOemVersionBytes & 0xff);
5758 
5759     return NVL_SUCCESS;
5760 }
5761 
5762 NvlStatus
5763 nvswitch_ctrl_get_inforom_version_lr10
5764 (
5765     nvswitch_device *device,
5766     NVSWITCH_GET_INFOROM_VERSION_PARAMS *p
5767 )
5768 {
5769 
5770     struct inforom *pInforom = device->pInforom;
5771 
5772     if ((pInforom == NULL) || (!pInforom->IMG.bValid))
5773     {
5774         return -NVL_ERR_NOT_SUPPORTED;
5775     }
5776 
5777     if (NV_ARRAY_ELEMENTS(pInforom->IMG.object.version) <
5778         NVSWITCH_INFOROM_VERSION_LEN)
5779     {
5780         NVSWITCH_PRINT(device, ERROR,
5781                        "Inforom IMG object struct smaller than expected\n");
5782         return -NVL_ERR_INVALID_STATE;
5783     }
5784 
5785     nvswitch_inforom_string_copy(pInforom->IMG.object.version, p->version,
5786                                  NVSWITCH_INFOROM_VERSION_LEN);
5787 
5788     return NVL_SUCCESS;
5789 }
5790 
5791 void
5792 nvswitch_corelib_clear_link_state_lr10
5793 (
5794     nvlink_link *link
5795 )
5796 {
5797     // Receiver Detect needs to happen again
5798     link->bRxDetected = NV_FALSE;
5799 
5800     // INITNEGOTIATE needs to happen again
5801     link->bInitnegotiateConfigGood = NV_FALSE;
5802 
5803     // TxCommonMode needs to happen again
5804     link->bTxCommonModeFail = NV_FALSE;
5805 
5806     // SAFE transition needs to happen again
5807     link->bSafeTransitionFail = NV_FALSE;
5808 
5809     // Reset the SW state tracking the link and sublink states
5810     link->state            = NVLINK_LINKSTATE_OFF;
5811     link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF;
5812     link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF;
5813 }
5814 
5815 const static NvU32 nport_reg_addr[] =
5816 {
5817     NV_NPORT_CTRL,
5818     NV_NPORT_CTRL_SLCG,
5819     NV_NPORT_REQLINKID,
5820     NV_NPORT_PORTSTAT_CONTROL,
5821     NV_NPORT_PORTSTAT_SNAP_CONTROL,
5822     NV_NPORT_PORTSTAT_WINDOW_LIMIT,
5823     NV_NPORT_PORTSTAT_LIMIT_LOW_0,
5824     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0,
5825     NV_NPORT_PORTSTAT_LIMIT_HIGH_0,
5826     NV_NPORT_PORTSTAT_LIMIT_LOW_1,
5827     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1,
5828     NV_NPORT_PORTSTAT_LIMIT_HIGH_1,
5829     NV_NPORT_PORTSTAT_LIMIT_LOW_2,
5830     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2,
5831     NV_NPORT_PORTSTAT_LIMIT_HIGH_2,
5832     NV_NPORT_PORTSTAT_LIMIT_LOW_3,
5833     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3,
5834     NV_NPORT_PORTSTAT_LIMIT_HIGH_3,
5835     NV_NPORT_PORTSTAT_LIMIT_LOW_4,
5836     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4,
5837     NV_NPORT_PORTSTAT_LIMIT_HIGH_4,
5838     NV_NPORT_PORTSTAT_LIMIT_LOW_5,
5839     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5,
5840     NV_NPORT_PORTSTAT_LIMIT_HIGH_5,
5841     NV_NPORT_PORTSTAT_LIMIT_LOW_6,
5842     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6,
5843     NV_NPORT_PORTSTAT_LIMIT_HIGH_6,
5844     NV_NPORT_PORTSTAT_LIMIT_LOW_7,
5845     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7,
5846     NV_NPORT_PORTSTAT_LIMIT_HIGH_7,
5847     NV_NPORT_PORTSTAT_SOURCE_FILTER_0,
5848     NV_NPORT_PORTSTAT_SOURCE_FILTER_1,
5849     NV_ROUTE_ROUTE_CONTROL,
5850     NV_ROUTE_CMD_ROUTE_TABLE0,
5851     NV_ROUTE_CMD_ROUTE_TABLE1,
5852     NV_ROUTE_CMD_ROUTE_TABLE2,
5853     NV_ROUTE_CMD_ROUTE_TABLE3,
5854     NV_ROUTE_ERR_LOG_EN_0,
5855     NV_ROUTE_ERR_CONTAIN_EN_0,
5856     NV_ROUTE_ERR_ECC_CTRL,
5857     NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT,
5858     NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT,
5859     NV_INGRESS_ERR_LOG_EN_0,
5860     NV_INGRESS_ERR_CONTAIN_EN_0,
5861     NV_INGRESS_ERR_ECC_CTRL,
5862     NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT,
5863     NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT,
5864     NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT,
5865     NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT,
5866     NV_EGRESS_CTRL,
5867     NV_EGRESS_CTO_TIMER_LIMIT,
5868     NV_EGRESS_ERR_LOG_EN_0,
5869     NV_EGRESS_ERR_CONTAIN_EN_0,
5870     NV_EGRESS_ERR_ECC_CTRL,
5871     NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT,
5872     NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT,
5873     NV_TSTATE_TAGSTATECONTROL,
5874     NV_TSTATE_ATO_TIMER_LIMIT,
5875     NV_TSTATE_CREQ_CAM_LOCK,
5876     NV_TSTATE_ERR_LOG_EN_0,
5877     NV_TSTATE_ERR_CONTAIN_EN_0,
5878     NV_TSTATE_ERR_ECC_CTRL,
5879     NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5880     NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT,
5881     NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT,
5882     NV_SOURCETRACK_CTRL,
5883     NV_SOURCETRACK_MULTISEC_TIMER0,
5884     NV_SOURCETRACK_ERR_LOG_EN_0,
5885     NV_SOURCETRACK_ERR_CONTAIN_EN_0,
5886     NV_SOURCETRACK_ERR_ECC_CTRL,
5887     NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5888     NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5889     NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5890 };
5891 
5892 /*
5893  *  Disable interrupts comming from NPG & NVLW blocks.
5894  */
5895 static void
5896 _nvswitch_link_disable_interrupts_lr10
5897 (
5898     nvswitch_device *device,
5899     NvU32 link
5900 )
5901 {
5902     NvU32 i;
5903 
5904     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5905         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) |
5906         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) |
5907         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0));
5908 
5909     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5910     {
5911         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5912             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x0) |
5913             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x0) |
5914             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0));
5915 
5916         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5917             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x0) |
5918             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x0) |
5919             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x0));
5920 
5921         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5922             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x0) |
5923             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x0) |
5924             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0));
5925     }
5926 }
5927 
5928 /*
5929  *  Reset NPG & NVLW interrupt state.
5930  */
5931 static void
5932 _nvswitch_link_reset_interrupts_lr10
5933 (
5934     nvswitch_device *device,
5935     NvU32 link
5936 )
5937 {
5938     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5939     NvU32 i;
5940 
5941     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5942         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) |
5943         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) |
5944         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1));
5945 
5946     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5947     {
5948         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5949             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x1) |
5950             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x1) |
5951             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x1));
5952 
5953         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5954             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x1) |
5955             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x1) |
5956             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1));
5957 
5958         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5959             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x1) |
5960             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x1) |
5961             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x1));
5962     }
5963 
5964     // Enable interrupts which are disabled to prevent interrupt storm.
5965     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.route.fatal);
5966     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.route.nonfatal);
5967     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.fatal);
5968     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.nonfatal);
5969     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.fatal);
5970     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.nonfatal);
5971     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.fatal);
5972     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.nonfatal);
5973     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.fatal);
5974     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.nonfatal);
5975 
5976     // Clear fatal error status
5977     device->link[link].fatal_error_occurred = NV_FALSE;
5978 }
5979 
5980 /*
5981  * @Brief : Control to reset and drain the links.
5982  *
5983  * @param[in] device        A reference to the device to initialize
5984  * @param[in] linkMask      A mask of link(s) to be reset.
5985  *
5986  * @returns :               NVL_SUCCESS if there were no errors
5987  *                         -NVL_BAD_PARAMS if input parameters are wrong.
5988  *                         -NVL_ERR_INVALID_STATE if other errors are present and a full-chip reset is required.
5989  *                         -NVL_INITIALIZATION_TOTAL_FAILURE if NPORT initialization failed and a retry is required.
5990  */
5991 
5992 NvlStatus
5993 nvswitch_reset_and_drain_links_lr10
5994 (
5995     nvswitch_device *device,
5996     NvU64 link_mask
5997 )
5998 {
5999     NvlStatus status = -NVL_ERR_GENERIC;
6000     nvlink_link *link_info;
6001     NvU32 val;
6002     NvU32 link;
6003     NvU32 idx_nport;
6004     NvU32 npg;
6005     NVSWITCH_TIMEOUT timeout;
6006     NvBool           keepPolling;
6007     NvU32 i;
6008     NvU64 link_mode, tx_sublink_mode, rx_sublink_mode;
6009     NvU32 tx_sublink_submode, rx_sublink_submode;
6010     NvU32 *nport_reg_val = NULL;
6011     NvU32 reg_count = NV_ARRAY_ELEMENTS(nport_reg_addr);
6012     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6013 
6014     if ((link_mask == 0) ||
6015         (link_mask >> NVSWITCH_LINK_COUNT(device)))
6016     {
6017         NVSWITCH_PRINT(device, ERROR,
6018             "%s: Invalid link_mask = 0x%llx\n",
6019             __FUNCTION__, link_mask);
6020 
6021         return -NVL_BAD_ARGS;
6022     }
6023 
6024     // Check for in-active links
6025     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6026     {
6027         if (!nvswitch_is_link_valid(device, link))
6028         {
6029             NVSWITCH_PRINT(device, ERROR,
6030                 "%s: link #%d invalid\n",
6031                 __FUNCTION__, link);
6032 
6033             continue;
6034         }
6035         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, link))
6036         {
6037             NVSWITCH_PRINT(device, ERROR,
6038                 "%s: NPORT #%d invalid\n",
6039                 __FUNCTION__, link);
6040 
6041             continue;
6042         }
6043 
6044         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLW, link))
6045         {
6046             NVSWITCH_PRINT(device, ERROR,
6047                 "%s: NVLW #%d invalid\n",
6048                 __FUNCTION__, link);
6049 
6050             continue;
6051         }
6052     }
6053     FOR_EACH_INDEX_IN_MASK_END;
6054 
6055     // Buffer to backup NPORT state
6056     nport_reg_val = nvswitch_os_malloc(sizeof(nport_reg_addr));
6057     if (nport_reg_val == NULL)
6058     {
6059         NVSWITCH_PRINT(device, ERROR,
6060             "%s: Failed to allocate memory\n",
6061             __FUNCTION__);
6062 
6063         return -NVL_NO_MEM;
6064     }
6065 
6066     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6067     {
6068         // Unregister links to make them unusable while reset is in progress.
6069         link_info = nvswitch_get_link(device, link);
6070         if (link_info == NULL)
6071         {
6072             NVSWITCH_PRINT(device, ERROR,
6073                 "%s: invalid link %d\n",
6074                 __FUNCTION__, link);
6075             continue;
6076         }
6077 
6078         nvlink_lib_unregister_link(link_info);
6079 
6080         //
6081         // Step 0 :
6082         // Prior to starting port reset, FM must shutdown the NVlink links
6083         // it wishes to reset.
6084         // However, with shared-virtualization, FM is unable to shut down the links
6085         // since the GPU is no longer attached to the service VM.
6086         // In this case, we must perform unilateral shutdown on the LR10 side
6087         // of the link.
6088         //
6089         // If links are in OFF or RESET, we don't need to perform shutdown
6090         // If links already went through a proper pseudo-clean shutdown sequence,
6091         // they'll be in SAFE + sublinks in OFF
6092         //
6093 
6094         status = nvswitch_corelib_get_dl_link_mode_lr10(link_info, &link_mode);
6095         if (status != NVL_SUCCESS)
6096         {
6097             NVSWITCH_PRINT(device, ERROR,
6098                 "%s: Unable to get link mode from link %d\n",
6099                 __FUNCTION__, link);
6100             goto nvswitch_reset_and_drain_links_exit;
6101         }
6102         status = nvswitch_corelib_get_tx_mode_lr10(link_info, &tx_sublink_mode, &tx_sublink_submode);
6103         if (status != NVL_SUCCESS)
6104         {
6105             NVSWITCH_PRINT(device, ERROR,
6106                 "%s: Unable to get tx sublink mode from link %d\n",
6107                 __FUNCTION__, link);
6108             goto nvswitch_reset_and_drain_links_exit;
6109         }
6110         status = nvswitch_corelib_get_rx_mode_lr10(link_info, &rx_sublink_mode, &rx_sublink_submode);
6111         if (status != NVL_SUCCESS)
6112         {
6113             NVSWITCH_PRINT(device, ERROR,
6114                 "%s: Unable to get rx sublink mode from link %d\n",
6115                 __FUNCTION__, link);
6116             goto nvswitch_reset_and_drain_links_exit;
6117         }
6118 
6119         if (!((link_mode == NVLINK_LINKSTATE_RESET) ||
6120               (link_mode == NVLINK_LINKSTATE_OFF) ||
6121               ((link_mode == NVLINK_LINKSTATE_SAFE) &&
6122                (tx_sublink_mode == NVLINK_SUBLINK_STATE_TX_OFF) &&
6123                (rx_sublink_mode == NVLINK_SUBLINK_STATE_RX_OFF))))
6124         {
6125             nvswitch_execute_unilateral_link_shutdown_lr10(link_info);
6126             nvswitch_corelib_clear_link_state_lr10(link_info);
6127         }
6128 
6129         //
6130         // Step 1 : Perform surgical reset
6131         // Refer to switch IAS 11.5.2 Link Reset.
6132         //
6133 
6134         // Step 1.a : Backup NPORT state before reset
6135         for (i = 0; i < reg_count; i++)
6136         {
6137             nport_reg_val[i] = NVSWITCH_ENG_OFF_RD32(device, NPORT, _UNICAST, link,
6138                 nport_reg_addr[i]);
6139         }
6140 
6141         // Step 1.b : Assert INGRESS_STOP / EGRESS_STOP
6142         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CTRL_STOP);
6143         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _INGRESS_STOP, _STOP, val);
6144         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _EGRESS_STOP, _STOP, val);
6145         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CTRL_STOP, val);
6146 
6147         // Wait for stop operation to take effect at TLC.
6148         // Expected a minimum of 256 clk cycles.
6149         nvswitch_os_sleep(1);
6150 
6151         //
6152         // Step 1.c : Disable NPG & NVLW interrupts
6153         //
6154         _nvswitch_link_disable_interrupts_lr10(device, link);
6155 
6156         // Step 1.d : Assert NPortWarmReset
6157         npg = link / NVSWITCH_LINKS_PER_NPG;
6158         val = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _WARMRESET);
6159 
6160         idx_nport = link % NVSWITCH_LINKS_PER_NPG;
6161         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET,
6162             DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, ~NVBIT(idx_nport)));
6163 
6164         // Step 1.e : Initiate Minion reset sequence.
6165         status = nvswitch_request_tl_link_state_lr10(link_info,
6166             NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
6167         if (status != NVL_SUCCESS)
6168         {
6169             NVSWITCH_PRINT(device, ERROR,
6170                 "%s: NvLink Reset has failed for link %d\n",
6171                 __FUNCTION__, link);
6172             goto nvswitch_reset_and_drain_links_exit;
6173         }
6174 
6175         // Step 1.e : De-assert NPortWarmReset
6176         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET, val);
6177 
6178         // Step 1.f : Assert and De-assert NPort debug_clear
6179         // to clear the error status
6180         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6181             DRF_NUM(_NPG, _DEBUG_CLEAR, _CLEAR, NVBIT(idx_nport)));
6182 
6183         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6184             DRF_DEF(_NPG, _DEBUG_CLEAR, _CLEAR, _DEASSERT));
6185 
6186         // Step 1.g : Clear CONTAIN_AND_DRAIN to clear contain state (Bug 3115824)
6187         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN,
6188             DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
6189 
6190         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN);
6191         if (FLD_TEST_DRF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE, val))
6192         {
6193             NVSWITCH_PRINT(device, ERROR,
6194                 "%s: NPORT Contain and Drain Clear has failed for link %d\n",
6195                 __FUNCTION__, link);
6196             status = NVL_ERR_INVALID_STATE;
6197             goto nvswitch_reset_and_drain_links_exit;
6198         }
6199 
6200         //
6201         // Step 2 : Assert NPORT Reset after Control & Drain routine.
6202         //  Clear Tagpool, CrumbStore and CAM RAMs
6203         //
6204 
6205         // Step 2.a Clear Tagpool RAM
6206         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _INITIALIZATION,
6207             DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT));
6208 
6209         nvswitch_timeout_create(25 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
6210 
6211         do
6212         {
6213             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
6214 
6215             // Check if NPORT initialization is done
6216             val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _INITIALIZATION);
6217             if (FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6218             {
6219                 break;
6220             }
6221 
6222             nvswitch_os_sleep(1);
6223         }
6224         while (keepPolling);
6225 
6226         if (!FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6227         {
6228             NVSWITCH_PRINT(device, ERROR,
6229                 "%s: Timeout waiting for TAGPOOL Initialization on link %d)\n",
6230                 __FUNCTION__, link);
6231 
6232             status = -NVL_INITIALIZATION_TOTAL_FAILURE;
6233             goto nvswitch_reset_and_drain_links_exit;
6234         }
6235 
6236         // Step 2.b Clear CrumbStore RAM
6237         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6238               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
6239               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6240 
6241         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6242         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6243 
6244         val = DRF_NUM(_TSTATE, _RAM_DATA0, _ECC, 0x7f);
6245         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_TAGPOOL_CRUMBSTORE_TDTID_DEPTH; i++)
6246         {
6247             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, val);
6248         }
6249 
6250         // Step 2.c Clear CAM RAM
6251         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6252               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CREQ_CAM) |
6253               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6254 
6255         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6256         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6257         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA2, 0x0);
6258 
6259         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_CREQ_CAM_DEPTH; i++)
6260         {
6261             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, 0x0);
6262         }
6263 
6264         //
6265         // Step 3 : Restore link state
6266         //
6267 
6268         // Restore NPORT state after reset
6269         for (i = 0; i < reg_count; i++)
6270         {
6271             NVSWITCH_ENG_OFF_WR32(device, NPORT, _UNICAST, link,
6272                                   nport_reg_addr[i], nport_reg_val[i]);
6273         }
6274 
6275         // Initialize GLT
6276         nvswitch_set_ganged_link_table_lr10(device, 0, chip_device->ganged_link_table,
6277                                             ROUTE_GANG_TABLE_SIZE/2);
6278 
6279         // Initialize select scratch registers to 0x0
6280         nvswitch_init_scratch_lr10(device);
6281 
6282         // Reset NVLW and NPORT interrupt state
6283         _nvswitch_link_reset_interrupts_lr10(device, link);
6284 
6285         // Re-register links.
6286         status = nvlink_lib_register_link(device->nvlink_device, link_info);
6287         if (status != NVL_SUCCESS)
6288         {
6289             nvswitch_destroy_link(link_info);
6290             goto nvswitch_reset_and_drain_links_exit;
6291         }
6292     }
6293     FOR_EACH_INDEX_IN_MASK_END;
6294 
6295     // Launch ALI training if applicable
6296     (void)nvswitch_launch_ALI(device);
6297 
6298 nvswitch_reset_and_drain_links_exit:
6299     nvswitch_os_free(nport_reg_val);
6300     return status;
6301 }
6302 
6303 NvlStatus
6304 nvswitch_get_nvlink_ecc_errors_lr10
6305 (
6306     nvswitch_device *device,
6307     NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params
6308 )
6309 {
6310     NvU32 statData;
6311     NvU8 i, j;
6312     NvlStatus status;
6313     NvBool bLaneReversed;
6314 
6315     nvswitch_os_memset(params->errorLink, 0, sizeof(params->errorLink));
6316 
6317     FOR_EACH_INDEX_IN_MASK(64, i, params->linkMask)
6318     {
6319         nvlink_link         *link;
6320         NVSWITCH_LANE_ERROR *errorLane;
6321         NvU8                offset;
6322         NvBool              minion_enabled;
6323         NvU32               sublinkWidth;
6324 
6325         link = nvswitch_get_link(device, i);
6326         sublinkWidth = device->hal.nvswitch_get_sublink_width(device, i);
6327 
6328         if ((link == NULL) ||
6329             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
6330             (i >= NVSWITCH_LINK_COUNT(device)))
6331         {
6332             return -NVL_BAD_ARGS;
6333         }
6334 
6335         minion_enabled = nvswitch_is_minion_initialized(device,
6336             NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
6337 
6338         bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
6339 
6340         for (j = 0; j < NVSWITCH_NVLINK_MAX_LANES; j++)
6341         {
6342             if (minion_enabled && (j < sublinkWidth))
6343             {
6344                 status = nvswitch_minion_get_dl_status(device, i,
6345                                         (NV_NVLSTAT_RX12 + j), 0, &statData);
6346 
6347                 if (status != NVL_SUCCESS)
6348                 {
6349                     return status;
6350                 }
6351                 offset = bLaneReversed ? ((sublinkWidth - 1) - j) : j;
6352                 errorLane                = &params->errorLink[i].errorLane[offset];
6353                 errorLane->valid         = NV_TRUE;
6354             }
6355             else
6356             {
6357                 // MINION disabled
6358                 statData                 = 0;
6359                 offset                   = j;
6360                 errorLane                = &params->errorLink[i].errorLane[offset];
6361                 errorLane->valid         = NV_FALSE;
6362             }
6363 
6364             errorLane->eccErrorValue = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_VALUE, statData);
6365             errorLane->overflowed    = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_OVER, statData);
6366         }
6367     }
6368     FOR_EACH_INDEX_IN_MASK_END;
6369 
6370     return NVL_SUCCESS;
6371 }
6372 
6373 static NvU32
6374 nvswitch_get_num_links_lr10
6375 (
6376     nvswitch_device *device
6377 )
6378 {
6379     NvU32 num_links = NVSWITCH_NUM_LINKS_LR10;
6380     return num_links;
6381 }
6382 
6383 NvBool
6384 nvswitch_is_link_valid_lr10
6385 (
6386     nvswitch_device *device,
6387     NvU32            link_id
6388 )
6389 {
6390     if (link_id >= nvswitch_get_num_links(device))
6391     {
6392         return NV_FALSE;
6393     }
6394     return device->link[link_id].valid;
6395 }
6396 
6397 NvlStatus
6398 nvswitch_ctrl_get_fom_values_lr10
6399 (
6400     nvswitch_device *device,
6401     NVSWITCH_GET_FOM_VALUES_PARAMS *p
6402 )
6403 {
6404     NvlStatus status;
6405     NvU32     statData;
6406     nvlink_link *link;
6407 
6408     link = nvswitch_get_link(device, p->linkId);
6409     if (link == NULL)
6410     {
6411         NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n",
6412             __FUNCTION__, p->linkId);
6413         return -NVL_BAD_ARGS;
6414     }
6415 
6416     if (nvswitch_is_link_in_reset(device, link))
6417     {
6418         NVSWITCH_PRINT(device, ERROR, "%s: link #%d is in reset\n",
6419             __FUNCTION__, p->linkId);
6420         return -NVL_ERR_INVALID_STATE;
6421     }
6422 
6423     status = nvswitch_minion_get_dl_status(device, p->linkId,
6424                                         NV_NVLSTAT_TR16, 0, &statData);
6425     p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF);
6426     p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF);
6427 
6428     status = nvswitch_minion_get_dl_status(device, p->linkId,
6429                                         NV_NVLSTAT_TR17, 0, &statData);
6430     p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF);
6431     p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF);
6432 
6433     p->numLanes = nvswitch_get_sublink_width(device, p->linkId);
6434 
6435     return status;
6436 }
6437 
6438 void
6439 nvswitch_set_fatal_error_lr10
6440 (
6441     nvswitch_device *device,
6442     NvBool           device_fatal,
6443     NvU32            link_id
6444 )
6445 {
6446     NvU32 reg;
6447 
6448     NVSWITCH_ASSERT(link_id < nvswitch_get_num_links(device));
6449 
6450     // On first fatal error, notify PORT_DOWN
6451     if (!device->link[link_id].fatal_error_occurred)
6452     {
6453         if (nvswitch_lib_notify_client_events(device,
6454                     NVSWITCH_DEVICE_EVENT_PORT_DOWN) != NVL_SUCCESS)
6455         {
6456             NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n",
6457                          __FUNCTION__);
6458         }
6459     }
6460 
6461     device->link[link_id].fatal_error_occurred = NV_TRUE;
6462 
6463     if (device_fatal)
6464     {
6465         reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6466         reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
6467                               1, reg);
6468 
6469         NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6470     }
6471     else
6472     {
6473         reg = NVSWITCH_LINK_RD32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM);
6474         reg = FLD_SET_DRF_NUM(_NPORT, _SCRATCH_WARM, _PORT_RESET_REQUIRED,
6475                               1, reg);
6476 
6477         NVSWITCH_LINK_WR32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, reg);
6478     }
6479 }
6480 
6481 static NvU32
6482 nvswitch_get_latency_sample_interval_msec_lr10
6483 (
6484     nvswitch_device *device
6485 )
6486 {
6487     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6488     return chip_device->latency_stats->sample_interval_msec;
6489 }
6490 
6491 NvU32
6492 nvswitch_get_swap_clk_default_lr10
6493 (
6494     nvswitch_device *device
6495 )
6496 {
6497     return -NVL_ERR_NOT_SUPPORTED;
6498 }
6499 
6500 NvBool
6501 nvswitch_is_link_in_use_lr10
6502 (
6503     nvswitch_device *device,
6504     NvU32 link_id
6505 )
6506 {
6507     NvU32 data;
6508     nvlink_link *link;
6509 
6510     link = nvswitch_get_link(device, link_id);
6511     if (link == NULL)
6512     {
6513         // A query on an invalid link should never occur
6514         NVSWITCH_ASSERT(link != NULL);
6515         return NV_FALSE;
6516     }
6517 
6518     if (nvswitch_is_link_in_reset(device, link))
6519     {
6520         return NV_FALSE;
6521     }
6522 
6523     data = NVSWITCH_LINK_RD32_LR10(device, link_id,
6524                                    NVLDL, _NVLDL_TOP, _LINK_STATE);
6525 
6526     return (DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data) !=
6527             NV_NVLDL_TOP_LINK_STATE_STATE_INIT);
6528 }
6529 
6530 static NvU32
6531 nvswitch_get_device_dma_width_lr10
6532 (
6533     nvswitch_device *device
6534 )
6535 {
6536     return DMA_ADDR_WIDTH_LR10;
6537 }
6538 
6539 NvU32
6540 nvswitch_get_link_ip_version_lr10
6541 (
6542     nvswitch_device *device,
6543     NvU32            link_id
6544 )
6545 {
6546     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6547     NvU32 nvldl_instance;
6548 
6549     nvldl_instance = NVSWITCH_GET_LINK_ENG_INST(device, link_id, NVLDL);
6550     if (NVSWITCH_ENG_IS_VALID(device, NVLDL, nvldl_instance))
6551     {
6552         return chip_device->engNVLDL[nvldl_instance].version;
6553     }
6554     else
6555     {
6556         NVSWITCH_PRINT(device, ERROR,
6557             "%s: NVLink[0x%x] NVLDL instance invalid\n",
6558             __FUNCTION__, link_id);
6559         return 0;
6560     }
6561 }
6562 
6563 static NvlStatus
6564 nvswitch_test_soe_dma_lr10
6565 (
6566     nvswitch_device *device
6567 )
6568 {
6569     return soeTestDma_HAL(device, (PSOE)device->pSoe);
6570 }
6571 
6572 static NvlStatus
6573 _nvswitch_get_reserved_throughput_counters
6574 (
6575     nvswitch_device *device,
6576     nvlink_link     *link,
6577     NvU16           counter_mask,
6578     NvU64           *counter_values
6579 )
6580 {
6581     NvU16 counter = 0;
6582 
6583     //
6584     // LR10 to use counters 0 & 2 for monitoring
6585     // (Same as GPU behavior)
6586     // Counter 0 counts data flits
6587     // Counter 2 counts all flits
6588     //
6589     FOR_EACH_INDEX_IN_MASK(16, counter, counter_mask)
6590     {
6591         NvU32 counter_type = NVBIT(counter);
6592         NvU64 data = 0;
6593 
6594         switch (counter_type)
6595         {
6596             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX:
6597             {
6598                 data = nvswitch_read_64bit_counter(device,
6599                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6600                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
6601                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6602                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
6603                 break;
6604             }
6605             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX:
6606             {
6607                 data = nvswitch_read_64bit_counter(device,
6608                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6609                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
6610                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6611                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
6612                 break;
6613             }
6614             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_TX:
6615             {
6616                 data = nvswitch_read_64bit_counter(device,
6617                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6618                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(2)),
6619                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6620                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(2)));
6621                 break;
6622             }
6623             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_RX:
6624             {
6625                 data = nvswitch_read_64bit_counter(device,
6626                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6627                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(2)),
6628                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6629                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(2)));
6630                 break;
6631             }
6632             default:
6633             {
6634                 return -NVL_ERR_NOT_SUPPORTED;
6635             }
6636         }
6637         counter_values[counter] = data;
6638     }
6639     FOR_EACH_INDEX_IN_MASK_END;
6640 
6641     return NVL_SUCCESS;
6642 }
6643 
6644 NvlStatus
6645 nvswitch_ctrl_get_throughput_counters_lr10
6646 (
6647     nvswitch_device *device,
6648     NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p
6649 )
6650 {
6651     NvlStatus status;
6652     nvlink_link *link;
6653     NvU16 i = 0;
6654 
6655     nvswitch_os_memset(p->counters, 0, sizeof(p->counters));
6656 
6657     FOR_EACH_INDEX_IN_MASK(64, i, p->linkMask)
6658     {
6659         link = nvswitch_get_link(device, i);
6660         if ((link == NULL) || (link->linkNumber >= NVSWITCH_MAX_PORTS) ||
6661             (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber)))
6662         {
6663             continue;
6664         }
6665 
6666         status = _nvswitch_get_reserved_throughput_counters(device, link, p->counterMask,
6667                         p->counters[link->linkNumber].values);
6668         if (status != NVL_SUCCESS)
6669         {
6670             NVSWITCH_PRINT(device, ERROR,
6671                 "Failed to get reserved NVLINK throughput counters on link %d\n",
6672                 link->linkNumber);
6673             return status;
6674         }
6675     }
6676     FOR_EACH_INDEX_IN_MASK_END;
6677 
6678     return NVL_SUCCESS;
6679 }
6680 
6681 static NvBool
6682 nvswitch_is_soe_supported_lr10
6683 (
6684     nvswitch_device *device
6685 )
6686 {
6687     return NV_TRUE;
6688 }
6689 
6690 NvBool
6691 nvswitch_is_inforom_supported_lr10
6692 (
6693     nvswitch_device *device
6694 )
6695 {
6696     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6697     {
6698         NVSWITCH_PRINT(device, INFO,
6699             "INFOROM is not supported on non-silicon platform\n");
6700         return NV_FALSE;
6701     }
6702 
6703     if (!nvswitch_is_soe_supported(device))
6704     {
6705         NVSWITCH_PRINT(device, INFO,
6706             "INFOROM is not supported since SOE is not supported\n");
6707         return NV_FALSE;
6708     }
6709 
6710     return NV_TRUE;
6711 }
6712 
6713 NvBool
6714 nvswitch_is_spi_supported_lr10
6715 (
6716     nvswitch_device *device
6717 )
6718 {
6719     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6720     {
6721         NVSWITCH_PRINT(device, INFO,
6722             "SPI is not supported on non-silicon platforms\n");
6723         return NV_FALSE;
6724     }
6725 
6726     if (!nvswitch_is_soe_supported(device))
6727     {
6728         NVSWITCH_PRINT(device, INFO,
6729             "SPI is not supported since SOE is not supported\n");
6730         return NV_FALSE;
6731     }
6732 
6733     return NV_TRUE;
6734 }
6735 
6736 NvBool
6737 nvswitch_is_smbpbi_supported_lr10
6738 (
6739     nvswitch_device *device
6740 )
6741 {
6742     if (IS_RTLSIM(device) || IS_FMODEL(device))
6743     {
6744         NVSWITCH_PRINT(device, INFO,
6745             "SMBPBI is not supported on RTLSIM/FMODEL platforms\n");
6746         return NV_FALSE;
6747     }
6748 
6749     if (!nvswitch_is_soe_supported(device))
6750     {
6751         NVSWITCH_PRINT(device, INFO,
6752             "SMBPBI is not supported since SOE is not supported\n");
6753         return NV_FALSE;
6754     }
6755 
6756     return NV_TRUE;
6757 }
6758 
6759 /*
6760  * @Brief : Additional setup needed after device initialization
6761  *
6762  * @Description :
6763  *
6764  * @param[in] device        a reference to the device to initialize
6765  */
6766 NvlStatus
6767 nvswitch_post_init_device_setup_lr10
6768 (
6769     nvswitch_device *device
6770 )
6771 {
6772     NvlStatus retval;
6773 
6774     if (device->regkeys.soe_dma_self_test ==
6775             NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE)
6776     {
6777         NVSWITCH_PRINT(device, INFO,
6778             "Skipping SOE DMA selftest as requested using regkey\n");
6779     }
6780     else if (IS_RTLSIM(device) || IS_FMODEL(device))
6781     {
6782         NVSWITCH_PRINT(device, SETUP,
6783             "Skipping DMA selftest on FMODEL/RTLSIM platforms\n");
6784     }
6785     else if (!nvswitch_is_soe_supported(device))
6786     {
6787         NVSWITCH_PRINT(device, SETUP,
6788             "Skipping DMA selftest since SOE is not supported\n");
6789     }
6790     else
6791     {
6792         retval = nvswitch_test_soe_dma_lr10(device);
6793         if (retval != NVL_SUCCESS)
6794         {
6795             return retval;
6796         }
6797     }
6798 
6799     if (nvswitch_is_inforom_supported(device))
6800     {
6801         nvswitch_inforom_post_init(device);
6802     }
6803     else
6804     {
6805         NVSWITCH_PRINT(device, SETUP, "Skipping INFOROM init\n");
6806     }
6807 
6808     nvswitch_soe_init_l2_state(device);
6809 
6810     return NVL_SUCCESS;
6811 }
6812 
6813 /*
6814  * @Brief : Additional setup needed after blacklisted device initialization
6815  *
6816  * @Description :
6817  *
6818  * @param[in] device        a reference to the device to initialize
6819  */
6820 void
6821 nvswitch_post_init_blacklist_device_setup_lr10
6822 (
6823     nvswitch_device *device
6824 )
6825 {
6826     NvlStatus status;
6827 
6828     if (nvswitch_is_inforom_supported(device))
6829     {
6830         nvswitch_inforom_post_init(device);
6831     }
6832 
6833     //
6834     // Initialize the driver state monitoring callback.
6835     // This is still needed for SOE to report correct driver state.
6836     //
6837     status = nvswitch_smbpbi_post_init(device);
6838     if (status != NVL_SUCCESS)
6839     {
6840         NVSWITCH_PRINT(device, ERROR, "Smbpbi post init failed, rc:%d\n",
6841                        status);
6842         return;
6843     }
6844 
6845     //
6846     // This internally will only flush if OMS value has changed
6847     //
6848     status = device->hal.nvswitch_oms_inforom_flush(device);
6849     if (status != NVL_SUCCESS)
6850     {
6851         NVSWITCH_PRINT(device, ERROR, "Flushing OMS failed, rc:%d\n",
6852                        status);
6853         return;
6854     }
6855 }
6856 
6857 void
6858 nvswitch_load_uuid_lr10
6859 (
6860     nvswitch_device *device
6861 )
6862 {
6863     NvU32 regData[4];
6864 
6865     //
6866     // Read 128-bit UUID from secure scratch registers which must be
6867     // populated by firmware.
6868     //
6869     regData[0] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_8);
6870     regData[1] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_9);
6871     regData[2] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_10);
6872     regData[3] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_11);
6873 
6874     nvswitch_os_memcpy(&device->uuid.uuid, (NvU8 *)regData, NV_UUID_LEN);
6875 }
6876 
6877 NvlStatus
6878 nvswitch_read_oob_blacklist_state_lr10
6879 (
6880     nvswitch_device *device
6881 )
6882 {
6883     NvU32 reg;
6884     NvBool is_oob_blacklist;
6885     NvlStatus status;
6886 
6887     if (device == NULL)
6888     {
6889         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6890         return -NVL_BAD_ARGS;
6891     }
6892 
6893     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SCRATCH_COLD);
6894 
6895     // Check for uninitialized SCRATCH_COLD before declaring the device blacklisted
6896     if (reg == NV_NVLSAW_SCRATCH_COLD_DATA_INIT)
6897         is_oob_blacklist = NV_FALSE;
6898     else
6899         is_oob_blacklist = DRF_VAL(_NVLSAW, _SCRATCH_COLD, _OOB_BLACKLIST_DEVICE_REQUESTED, reg);
6900 
6901     status = nvswitch_inforom_oms_set_device_disable(device, is_oob_blacklist);
6902     if (status != NVL_SUCCESS)
6903     {
6904         NVSWITCH_PRINT(device, ERROR,
6905             "Failed to set device disable to %d, rc:%d\n",
6906             is_oob_blacklist, status);
6907     }
6908 
6909     if (is_oob_blacklist)
6910     {
6911         device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED;
6912         device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND;
6913     }
6914 
6915     return NVL_SUCCESS;
6916 }
6917 
6918 NvlStatus
6919 nvswitch_write_fabric_state_lr10
6920 (
6921     nvswitch_device *device
6922 )
6923 {
6924     NvU32 reg;
6925 
6926     if (device == NULL)
6927     {
6928         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6929         return -NVL_BAD_ARGS;
6930     }
6931 
6932     // bump the sequence number for each write
6933     device->fabric_state_sequence_number++;
6934 
6935     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6936 
6937     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_BLACKLIST_REASON,
6938                           device->device_blacklist_reason, reg);
6939     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_FABRIC_STATE,
6940                           device->device_fabric_state, reg);
6941     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DRIVER_FABRIC_STATE,
6942                           device->driver_fabric_state, reg);
6943     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _EVENT_MESSAGE_COUNT,
6944                           device->fabric_state_sequence_number, reg);
6945 
6946     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6947 
6948     return NVL_SUCCESS;
6949 }
6950 
6951 static NVSWITCH_ENGINE_DESCRIPTOR_TYPE *
6952 _nvswitch_get_eng_descriptor_lr10
6953 (
6954     nvswitch_device *device,
6955     NVSWITCH_ENGINE_ID eng_id
6956 )
6957 {
6958     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6959     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = NULL;
6960 
6961     if (eng_id >= NVSWITCH_ENGINE_ID_SIZE)
6962     {
6963         NVSWITCH_PRINT(device, ERROR,
6964             "%s: Engine_ID 0x%x out of range 0..0x%x\n",
6965             __FUNCTION__,
6966             eng_id, NVSWITCH_ENGINE_ID_SIZE-1);
6967         return NULL;
6968     }
6969 
6970     engine = &(chip_device->io.common[eng_id]);
6971     NVSWITCH_ASSERT(eng_id == engine->eng_id);
6972 
6973     return engine;
6974 }
6975 
6976 NvU32
6977 nvswitch_get_eng_base_lr10
6978 (
6979     nvswitch_device *device,
6980     NVSWITCH_ENGINE_ID eng_id,
6981     NvU32 eng_bcast,
6982     NvU32 eng_instance
6983 )
6984 {
6985     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
6986     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
6987 
6988     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
6989     if (engine == NULL)
6990     {
6991         NVSWITCH_PRINT(device, ERROR,
6992             "%s: ID 0x%x[%d] %s not found\n",
6993             __FUNCTION__,
6994             eng_id, eng_instance,
6995             (
6996                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
6997                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
6998                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
6999                 "??"
7000             ));
7001         return NVSWITCH_BASE_ADDR_INVALID;
7002     }
7003 
7004     if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) &&
7005         (eng_instance < engine->eng_count))
7006     {
7007         base_addr = engine->uc_addr[eng_instance];
7008     }
7009     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7010     {
7011         base_addr = engine->bc_addr;
7012     }
7013     else if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) &&
7014         (eng_instance < engine->mc_addr_count))
7015     {
7016         base_addr = engine->mc_addr[eng_instance];
7017     }
7018     else
7019     {
7020         NVSWITCH_PRINT(device, ERROR,
7021             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7022             __FUNCTION__,
7023             eng_bcast);
7024     }
7025 
7026     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7027     {
7028         NVSWITCH_PRINT(device, ERROR,
7029             "%s: ID 0x%x[%d] %s invalid address\n",
7030             __FUNCTION__,
7031             eng_id, eng_instance,
7032             (
7033                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7034                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7035                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7036                 "??"
7037             ));
7038     }
7039 
7040     return base_addr;
7041 }
7042 
7043 NvU32
7044 nvswitch_get_eng_count_lr10
7045 (
7046     nvswitch_device *device,
7047     NVSWITCH_ENGINE_ID eng_id,
7048     NvU32 eng_bcast
7049 )
7050 {
7051     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
7052     NvU32 eng_count = 0;
7053 
7054     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7055     if (engine == NULL)
7056     {
7057         NVSWITCH_PRINT(device, ERROR,
7058             "%s: ID 0x%x %s not found\n",
7059             __FUNCTION__,
7060             eng_id,
7061             (
7062                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7063                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7064                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7065                 "??"
7066             ));
7067         return 0;
7068     }
7069 
7070     if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST)
7071     {
7072         eng_count = engine->eng_count;
7073     }
7074     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7075     {
7076         if (engine->bc_addr == NVSWITCH_BASE_ADDR_INVALID)
7077         {
7078             eng_count = 0;
7079         }
7080         else
7081         {
7082             eng_count = 1;
7083         }
7084     }
7085     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST)
7086     {
7087         eng_count = engine->mc_addr_count;
7088     }
7089     else
7090     {
7091         NVSWITCH_PRINT(device, ERROR,
7092             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7093             __FUNCTION__,
7094             eng_bcast);
7095     }
7096 
7097     return eng_count;
7098 }
7099 
7100 NvU32
7101 nvswitch_eng_rd_lr10
7102 (
7103     nvswitch_device *device,
7104     NVSWITCH_ENGINE_ID eng_id,
7105     NvU32 eng_bcast,
7106     NvU32 eng_instance,
7107     NvU32 offset
7108 )
7109 {
7110     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7111     NvU32 data;
7112 
7113     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7114     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7115     {
7116         NVSWITCH_PRINT(device, ERROR,
7117             "%s: ID 0x%x[%d] %s invalid address\n",
7118             __FUNCTION__,
7119             eng_id, eng_instance,
7120             (
7121                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7122                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7123                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7124                 "??"
7125             ));
7126         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7127         return 0xBADFBADF;
7128     }
7129 
7130     data = nvswitch_reg_read_32(device, base_addr + offset);
7131 
7132 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7133     {
7134         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7135 
7136         NVSWITCH_PRINT(device, MMIO,
7137             "%s: ENG_RD %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7138             __FUNCTION__,
7139             engine->eng_name, engine->eng_id,
7140             eng_instance,
7141             base_addr, offset,
7142             data);
7143     }
7144 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7145 
7146     return data;
7147 }
7148 
7149 void
7150 nvswitch_eng_wr_lr10
7151 (
7152     nvswitch_device *device,
7153     NVSWITCH_ENGINE_ID eng_id,
7154     NvU32 eng_bcast,
7155     NvU32 eng_instance,
7156     NvU32 offset,
7157     NvU32 data
7158 )
7159 {
7160     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7161 
7162     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7163     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7164     {
7165         NVSWITCH_PRINT(device, ERROR,
7166             "%s: ID 0x%x[%d] %s invalid address\n",
7167             __FUNCTION__,
7168             eng_id, eng_instance,
7169             (
7170                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7171                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7172                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7173                 "??"
7174             ));
7175         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7176         return;
7177     }
7178 
7179     nvswitch_reg_write_32(device, base_addr + offset,  data);
7180 
7181 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7182     {
7183         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7184 
7185         NVSWITCH_PRINT(device, MMIO,
7186             "%s: ENG_WR %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7187             __FUNCTION__,
7188             engine->eng_name, engine->eng_id,
7189             eng_instance,
7190             base_addr, offset,
7191             data);
7192     }
7193 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7194 }
7195 
7196 NvU32
7197 nvswitch_get_link_eng_inst_lr10
7198 (
7199     nvswitch_device *device,
7200     NvU32 link_id,
7201     NVSWITCH_ENGINE_ID eng_id
7202 )
7203 {
7204     NvU32   eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7205 
7206     if (link_id >= NVSWITCH_LINK_COUNT(device))
7207     {
7208         NVSWITCH_PRINT(device, ERROR,
7209             "%s: link ID 0x%x out-of-range [0x0..0x%x]\n",
7210             __FUNCTION__,
7211             link_id, NVSWITCH_LINK_COUNT(device)-1);
7212         return NVSWITCH_ENGINE_INSTANCE_INVALID;
7213     }
7214 
7215     switch (eng_id)
7216     {
7217         case NVSWITCH_ENGINE_ID_NPG:
7218             eng_instance = link_id / NVSWITCH_LINKS_PER_NPG;
7219             break;
7220         case NVSWITCH_ENGINE_ID_NVLIPT:
7221             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLIPT;
7222             break;
7223         case NVSWITCH_ENGINE_ID_NVLW:
7224         case NVSWITCH_ENGINE_ID_NVLW_PERFMON:
7225             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLW;
7226             break;
7227         case NVSWITCH_ENGINE_ID_MINION:
7228             eng_instance = link_id / NVSWITCH_LINKS_PER_MINION;
7229             break;
7230         case NVSWITCH_ENGINE_ID_NPORT:
7231         case NVSWITCH_ENGINE_ID_NVLTLC:
7232         case NVSWITCH_ENGINE_ID_NVLDL:
7233         case NVSWITCH_ENGINE_ID_NVLIPT_LNK:
7234         case NVSWITCH_ENGINE_ID_NPORT_PERFMON:
7235             eng_instance = link_id;
7236             break;
7237         default:
7238             NVSWITCH_PRINT(device, ERROR,
7239                 "%s: link ID 0x%x has no association with EngID 0x%x\n",
7240                 __FUNCTION__,
7241                 link_id, eng_id);
7242             eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7243             break;
7244     }
7245 
7246     return eng_instance;
7247 }
7248 
7249 NvU32
7250 nvswitch_get_caps_nvlink_version_lr10
7251 (
7252     nvswitch_device *device
7253 )
7254 {
7255     ct_assert(NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0 ==
7256                 NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0);
7257     return NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0;
7258 }
7259 
7260 NVSWITCH_BIOS_NVLINK_CONFIG *
7261 nvswitch_get_bios_nvlink_config_lr10
7262 (
7263     nvswitch_device *device
7264 )
7265 {
7266     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
7267 
7268     return (chip_device != NULL) ? &chip_device->bios_config : NULL;
7269 }
7270 
7271 /*
7272  * CTRL_NVSWITCH_SET_RESIDENCY_BINS
7273  */
7274 static NvlStatus
7275 nvswitch_ctrl_set_residency_bins_lr10
7276 (
7277     nvswitch_device *device,
7278     NVSWITCH_SET_RESIDENCY_BINS *p
7279 )
7280 {
7281     NVSWITCH_PRINT(device, ERROR,
7282         "SET_RESIDENCY_BINS should not be called on LR10\n");
7283     return -NVL_ERR_NOT_SUPPORTED;
7284 }
7285 
7286 /*
7287  * CTRL_NVSWITCH_GET_RESIDENCY_BINS
7288  */
7289 static NvlStatus
7290 nvswitch_ctrl_get_residency_bins_lr10
7291 (
7292     nvswitch_device *device,
7293     NVSWITCH_GET_RESIDENCY_BINS *p
7294 )
7295 {
7296     NVSWITCH_PRINT(device, ERROR,
7297         "GET_RESIDENCY_BINS should not be called on LR10\n");
7298     return -NVL_ERR_NOT_SUPPORTED;
7299 }
7300 
7301 /*
7302  * CTRL_NVSWITCH_GET_RB_STALL_BUSY
7303  */
7304 static NvlStatus
7305 nvswitch_ctrl_get_rb_stall_busy_lr10
7306 (
7307     nvswitch_device *device,
7308     NVSWITCH_GET_RB_STALL_BUSY *p
7309 )
7310 {
7311     NVSWITCH_PRINT(device, ERROR,
7312         "GET_RB_STALL_BUSY should not be called on LR10\n");
7313     return -NVL_ERR_NOT_SUPPORTED;
7314 }
7315 
7316 /*
7317  * CTRL_NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR
7318  */
7319 static NvlStatus
7320 nvswitch_ctrl_get_multicast_id_error_vector_lr10
7321 (
7322     nvswitch_device *device,
7323     NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR *p
7324 )
7325 {
7326     NVSWITCH_PRINT(device, ERROR,
7327         "GET_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7328     return -NVL_ERR_NOT_SUPPORTED;
7329 }
7330 
7331 /*
7332  * CTRL_NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR
7333  */
7334 static NvlStatus
7335 nvswitch_ctrl_clear_multicast_id_error_vector_lr10
7336 (
7337     nvswitch_device *device,
7338     NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR *p
7339 )
7340 {
7341     NVSWITCH_PRINT(device, ERROR,
7342         "CLEAR_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7343     return -NVL_ERR_NOT_SUPPORTED;
7344 }
7345 
7346 void
7347 nvswitch_send_inband_nack_lr10
7348 (
7349     nvswitch_device *device,
7350     NvU32 *msghdr,
7351     NvU32  linkId
7352 )
7353 {
7354     return;
7355 }
7356 
7357 NvU32
7358 nvswitch_get_max_persistent_message_count_lr10
7359 (
7360     nvswitch_device *device
7361 )
7362 {
7363     return 0;
7364 }
7365 
7366 /*
7367  * CTRL_NVSWITCH_INBAND_SEND_DATA
7368  */
7369 NvlStatus
7370 nvswitch_ctrl_inband_send_data_lr10
7371 (
7372     nvswitch_device *device,
7373     NVSWITCH_INBAND_SEND_DATA_PARAMS *p
7374 )
7375 {
7376     return -NVL_ERR_NOT_SUPPORTED;
7377 }
7378 
7379 /*
7380  * CTRL_NVSWITCH_INBAND_RECEIVE_DATA
7381  */
7382 NvlStatus
7383 nvswitch_ctrl_inband_read_data_lr10
7384 (
7385     nvswitch_device *device,
7386     NVSWITCH_INBAND_READ_DATA_PARAMS *p
7387 )
7388 {
7389     return -NVL_ERR_NOT_SUPPORTED;
7390 }
7391 
7392 /*
7393  * CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
7394  */
7395 NvlStatus
7396 nvswitch_ctrl_get_board_part_number_lr10
7397 (
7398     nvswitch_device *device,
7399     NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
7400 )
7401 {
7402     struct inforom *pInforom = device->pInforom;
7403     INFOROM_OBD_OBJECT_V1_XX *pOBDObj;
7404     int byteIdx;
7405 
7406     if (pInforom == NULL)
7407     {
7408         return -NVL_ERR_NOT_SUPPORTED;
7409     }
7410 
7411     if (!pInforom->OBD.bValid)
7412     {
7413         NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
7414         return -NVL_ERR_GENERIC;
7415     }
7416 
7417     pOBDObj = &pInforom->OBD.object.v1;
7418 
7419     if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008))
7420     {
7421         NVSWITCH_PRINT(device, ERROR,
7422                        "board part number available size %lu is not same as the request size %lu\n",
7423                        sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data));
7424         return -NVL_ERR_GENERIC;
7425     }
7426 
7427     nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
7428 
7429     /* Copy board type data */
7430     for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++)
7431     {
7432         p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF);
7433     }
7434 
7435     return NVL_SUCCESS;
7436 }
7437 
7438 /*
7439 * @brief: This function retrieves the NVLIPT public ID for a given global link idx
7440 * @params[in]  device        reference to current nvswitch device
7441 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7442 * @params[out] publicId      Public ID of NVLIPT owning linkId
7443 */
7444 NvlStatus nvswitch_get_link_public_id_lr10
7445 (
7446     nvswitch_device *device,
7447     NvU32 linkId,
7448     NvU32 *publicId
7449 )
7450 {
7451     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7452         (publicId == NULL))
7453     {
7454         return -NVL_BAD_ARGS;
7455     }
7456 
7457     *publicId = NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(linkId);
7458 
7459 
7460     return (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, *publicId)) ?
7461                 NVL_SUCCESS : -NVL_BAD_ARGS;
7462 }
7463 
7464 /*
7465 * @brief: This function retrieves the internal link idx for a given global link idx
7466 * @params[in]  device        reference to current nvswitch device
7467 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7468 * @params[out] localLinkIdx  Internal link index of linkId
7469 */
7470 NvlStatus nvswitch_get_link_local_idx_lr10
7471 (
7472     nvswitch_device *device,
7473     NvU32 linkId,
7474     NvU32 *localLinkIdx
7475 )
7476 {
7477     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7478         (localLinkIdx == NULL))
7479     {
7480         return -NVL_BAD_ARGS;
7481     }
7482 
7483     *localLinkIdx = NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(linkId);
7484 
7485     return NVL_SUCCESS;
7486 }
7487 
7488 NvlStatus nvswitch_set_training_error_info_lr10
7489 (
7490     nvswitch_device *device,
7491     NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams
7492 )
7493 {
7494     NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo;
7495     NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo;
7496 
7497     linkTrainingErrorInfo.isValid = NV_TRUE;
7498     linkTrainingErrorInfo.attemptedTrainingMask0 =
7499         pLinkTrainingErrorInfoParams->attemptedTrainingMask0;
7500     linkTrainingErrorInfo.trainingErrorMask0 =
7501         pLinkTrainingErrorInfoParams->trainingErrorMask0;
7502 
7503     linkRuntimeErrorInfo.isValid = NV_FALSE;
7504     linkRuntimeErrorInfo.mask0   = 0;
7505 
7506     return nvswitch_smbpbi_set_link_error_info(device,
7507                                                &linkTrainingErrorInfo,
7508                                                &linkRuntimeErrorInfo);
7509 }
7510 
7511 NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10
7512 (
7513     nvswitch_device *device,
7514     NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams
7515 )
7516 {
7517     NvU32 linkId;
7518     NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
7519     pParams->device = FLD_TEST_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
7520                                        1, reg);
7521 
7522     for (linkId = 0; linkId < NVSWITCH_MAX_PORTS; linkId++)
7523     {
7524         if (!nvswitch_is_link_valid(device, linkId))
7525         {
7526             pParams->port[linkId] = NV_FALSE;
7527             continue;
7528         }
7529 
7530         reg = NVSWITCH_LINK_RD32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7531         pParams->port[linkId] = FLD_TEST_DRF_NUM(_NPORT, _SCRATCH_WARM,
7532                                                  _PORT_RESET_REQUIRED, 1, reg);
7533     }
7534 
7535     return NVL_SUCCESS;
7536 }
7537 
7538 NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10
7539 (
7540     nvswitch_device *device,
7541     NVSWITCH_SET_MC_RID_TABLE_PARAMS *p
7542 )
7543 {
7544     return -NVL_ERR_NOT_SUPPORTED;
7545 }
7546 
7547 NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10
7548 (
7549     nvswitch_device *device,
7550     NVSWITCH_GET_MC_RID_TABLE_PARAMS *p
7551 )
7552 {
7553     return -NVL_ERR_NOT_SUPPORTED;
7554 }
7555 
7556 void nvswitch_init_scratch_lr10
7557 (
7558     nvswitch_device *device
7559 )
7560 {
7561     NvU32 linkId;
7562     NvU32 reg;
7563 
7564     for (linkId = 0; linkId < nvswitch_get_num_links(device); linkId++)
7565     {
7566         if (!nvswitch_is_link_valid(device, linkId))
7567         {
7568             continue;
7569         }
7570 
7571         reg = NVSWITCH_LINK_RD32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7572         if (reg == NV_NPORT_SCRATCH_WARM_DATA_INIT)
7573         {
7574             NVSWITCH_LINK_WR32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM, 0);
7575         }
7576     }
7577 }
7578 
7579 NvlStatus
7580 nvswitch_launch_ALI_lr10
7581 (
7582     nvswitch_device *device
7583 )
7584 {
7585     return -NVL_ERR_NOT_SUPPORTED;
7586 }
7587 
7588 NvlStatus
7589 nvswitch_set_training_mode_lr10
7590 (
7591     nvswitch_device *device
7592 )
7593 {
7594     return NVL_SUCCESS;
7595 }
7596 
7597 NvlStatus
7598 nvswitch_parse_bios_image_lr10
7599 (
7600     nvswitch_device *device
7601 )
7602 {
7603     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config;
7604     NV_STATUS status = NV_OK;
7605 
7606     // check if spi is supported
7607     if (!nvswitch_is_spi_supported(device))
7608     {
7609         NVSWITCH_PRINT(device, ERROR,
7610                 "%s: SPI is not supported\n",
7611                 __FUNCTION__);
7612         return -NVL_ERR_NOT_SUPPORTED;
7613     }
7614 
7615     bios_config = nvswitch_get_bios_nvlink_config(device);
7616 
7617     // Parse and retrieve the VBIOS info
7618     status = _nvswitch_setup_link_vbios_overrides(device, bios_config);
7619     if ((status != NV_OK) && device->pSoe)
7620     {
7621         //To enable LS10 bringup (VBIOS is not ready and SOE is disabled), fail the device init only when SOE is enabled and vbios overrides has failed
7622         NVSWITCH_PRINT(device, ERROR,
7623                 "%s: error=0x%x\n",
7624                 __FUNCTION__, status);
7625 
7626         return -NVL_ERR_GENERIC;
7627     }
7628 
7629     return NVL_SUCCESS;
7630 }
7631 
7632 NvlStatus
7633 nvswitch_ctrl_get_nvlink_lp_counters_lr10
7634 (
7635     nvswitch_device *device,
7636     NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params
7637 )
7638 {
7639     return -NVL_ERR_NOT_SUPPORTED;
7640 }
7641 
7642 NvlStatus
7643 nvswitch_ctrl_get_sw_info_lr10
7644 (
7645     nvswitch_device *device,
7646     NVSWITCH_GET_SW_INFO_PARAMS *p
7647 )
7648 {
7649     NvlStatus retval = NVL_SUCCESS;
7650     NvU32 i;
7651 
7652     if (p->count > NVSWITCH_GET_SW_INFO_COUNT_MAX)
7653     {
7654         NVSWITCH_PRINT(device, ERROR,
7655             "%s: Invalid args\n",
7656             __FUNCTION__);
7657         return -NVL_BAD_ARGS;
7658     }
7659 
7660     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_SW_INFO_COUNT_MAX);
7661 
7662     for (i = 0; i < p->count; i++)
7663     {
7664         switch (p->index[i])
7665         {
7666             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED:
7667                 p->info[i] = (NvU32)_nvswitch_inforom_nvl_supported(device);
7668                 break;
7669             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED:
7670                 p->info[i] = (NvU32)_nvswitch_inforom_bbx_supported(device);
7671                 break;
7672             default:
7673                 NVSWITCH_PRINT(device, ERROR,
7674                     "%s: Undefined NVSWITCH_GET_SW_INFO_INDEX 0x%x\n",
7675                     __FUNCTION__,
7676                     p->index[i]);
7677                 retval = -NVL_BAD_ARGS;
7678                 break;
7679         }
7680     }
7681 
7682     return retval;
7683 }
7684 
7685 NvlStatus
7686 nvswitch_ctrl_get_err_info_lr10
7687 (
7688     nvswitch_device *device,
7689     NVSWITCH_NVLINK_GET_ERR_INFO_PARAMS *ret
7690 )
7691 {
7692     nvlink_link *link;
7693     NvU32 data;
7694     NvU8 i;
7695 
7696      ret->linkMask = nvswitch_get_enabled_link_mask(device);
7697 
7698     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7699     {
7700         link = nvswitch_get_link(device, i);
7701 
7702         if ((link == NULL) ||
7703             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
7704             (i >= NVSWITCH_NVLINK_MAX_LINKS))
7705         {
7706             continue;
7707         }
7708 
7709         // TODO NVidia TL not supported
7710         NVSWITCH_PRINT(device, WARN,
7711             "%s WARNING: Nvidia %s register %s does not exist!\n",
7712             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_ERRLOG_REG");
7713 
7714         NVSWITCH_PRINT(device, WARN,
7715             "%s WARNING: Nvidia %s register %s does not exist!\n",
7716             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_INTEN_REG");
7717 
7718         ret->linkErrInfo[i].TLErrlog = 0x0;
7719         ret->linkErrInfo[i].TLIntrEn = 0x0;
7720 
7721         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
7722         ret->linkErrInfo[i].DLSpeedStatusTx =
7723             DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, data);
7724 
7725         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
7726         ret->linkErrInfo[i].DLSpeedStatusRx =
7727             DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, data);
7728 
7729         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR);
7730         ret->linkErrInfo[i].bExcessErrorDL =
7731             !!DRF_VAL(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, data);
7732 
7733         if (ret->linkErrInfo[i].bExcessErrorDL)
7734         {
7735             NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR,
7736                 DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 0x1));
7737         }
7738     }
7739     FOR_EACH_INDEX_IN_MASK_END;
7740 
7741     return NVL_SUCCESS;
7742 }
7743 
7744 static NvlStatus
7745 nvswitch_ctrl_clear_counters_lr10
7746 (
7747     nvswitch_device *device,
7748     NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret
7749 )
7750 {
7751     nvlink_link *link;
7752     NvU8 i;
7753     NvU32 counterMask;
7754     NvlStatus status = NVL_SUCCESS;
7755 
7756     counterMask = ret->counterMask;
7757 
7758     // Common usage allows one of these to stand for all of them
7759     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7760                         | NVSWITCH_NVLINK_COUNTER_TL_TX1
7761                         | NVSWITCH_NVLINK_COUNTER_TL_RX0
7762                         | NVSWITCH_NVLINK_COUNTER_TL_RX1
7763                         ))
7764     {
7765         counterMask |= ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7766                        | NVSWITCH_NVLINK_COUNTER_TL_TX1
7767                        | NVSWITCH_NVLINK_COUNTER_TL_RX0
7768                        | NVSWITCH_NVLINK_COUNTER_TL_RX1
7769                        );
7770     }
7771 
7772     // Common usage allows one of these to stand for all of them
7773     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7774                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7775                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7776                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7777                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7778                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7779                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7780                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7781                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7782                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7783                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7784                         ))
7785     {
7786         counterMask |= ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7787                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7788                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7789                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7790                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7791                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7792                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7793                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7794                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7795                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7796                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7797                        );
7798     }
7799 
7800     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7801     {
7802         link = nvswitch_get_link(device, i);
7803         if (link == NULL)
7804         {
7805             continue;
7806         }
7807 
7808         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber))
7809         {
7810             nvswitch_ctrl_clear_throughput_counters_lr10(device, link, counterMask);
7811         }
7812         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
7813         {
7814             status = nvswitch_ctrl_clear_dl_error_counters_lr10(device, link, counterMask);
7815             // Return early with failure on clearing through minion
7816             if (status != NVL_SUCCESS)
7817             {
7818                 NVSWITCH_PRINT(device, ERROR,
7819                     "%s: Failure on clearing link counter mask 0x%x on link %d\n",
7820                     __FUNCTION__, counterMask, link->linkNumber);
7821                 break;
7822             }
7823         }
7824     }
7825     FOR_EACH_INDEX_IN_MASK_END;
7826 
7827     return status;
7828 }
7829 
7830 NvlStatus
7831 nvswitch_ctrl_set_nvlink_error_threshold_lr10
7832 (
7833     nvswitch_device *device,
7834     NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7835 )
7836 {
7837     return -NVL_ERR_NOT_SUPPORTED;
7838 }
7839 
7840 static NvlStatus
7841 nvswitch_ctrl_get_nvlink_error_threshold_lr10
7842 (
7843     nvswitch_device *device,
7844     NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7845 )
7846 {
7847     return -NVL_ERR_NOT_SUPPORTED;
7848 }
7849 
7850 //
7851 // This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
7852 // macro in haldef_nvswitch.h
7853 //
7854 // Note: All hal fns must be implemented for each chip.
7855 //       There is no automatic stubbing here.
7856 //
7857 void nvswitch_setup_hal_lr10(nvswitch_device *device)
7858 {
7859     device->chip_arch = NVSWITCH_GET_INFO_INDEX_ARCH_LR10;
7860 
7861     {
7862         device->chip_impl = NVSWITCH_GET_INFO_INDEX_IMPL_LR10;
7863     }
7864 
7865     NVSWITCH_INIT_HAL(device, lr10);
7866     NVSWITCH_INIT_HAL_LS10(device, lr10);
7867 }
7868