1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "common_nvswitch.h"
25 #include "bios_nvswitch.h"
26 #include "error_nvswitch.h"
27 #include "regkey_nvswitch.h"
28 #include "haldef_nvswitch.h"
29 #include "lr10/lr10.h"
30 #include "lr10/clock_lr10.h"
31 #include "lr10/minion_lr10.h"
32 #include "lr10/soe_lr10.h"
33 #include "lr10/pmgr_lr10.h"
34 #include "lr10/therm_lr10.h"
35 #include "lr10/inforom_lr10.h"
36 #include "lr10/smbpbi_lr10.h"
37 #include "flcn/flcnable_nvswitch.h"
38 #include "soe/soe_nvswitch.h"
39 #include "lr10/cci_lr10.h"
40 
41 #include "nvswitch/lr10/dev_nvs_top.h"
42 #include "nvswitch/lr10/dev_pri_ringmaster.h"
43 #include "nvswitch/lr10/dev_pri_ringstation_sys.h"
44 #include "nvswitch/lr10/dev_nvlsaw_ip.h"
45 #include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h"
46 #include "nvswitch/lr10/dev_nvs_master.h"
47 #include "nvswitch/lr10/dev_nvltlc_ip.h"
48 #include "nvswitch/lr10/dev_nvldl_ip.h"
49 #include "nvswitch/lr10/dev_nvlipt_lnk_ip.h"
50 #include "nvswitch/lr10/dev_nvlctrl_ip.h"
51 #include "nvswitch/lr10/dev_npg_ip.h"
52 #include "nvswitch/lr10/dev_npgperf_ip.h"
53 #include "nvswitch/lr10/dev_nport_ip.h"
54 #include "nvswitch/lr10/dev_ingress_ip.h"
55 #include "nvswitch/lr10/dev_tstate_ip.h"
56 #include "nvswitch/lr10/dev_egress_ip.h"
57 #include "nvswitch/lr10/dev_route_ip.h"
58 #include "nvswitch/lr10/dev_therm.h"
59 #include "nvswitch/lr10/dev_soe_ip.h"
60 #include "nvswitch/lr10/dev_route_ip_addendum.h"
61 #include "nvswitch/lr10/dev_minion_ip.h"
62 #include "nvswitch/lr10/dev_minion_ip_addendum.h"
63 #include "nvswitch/lr10/dev_nport_ip_addendum.h"
64 #include "nvswitch/lr10/dev_nxbar_tile_ip.h"
65 #include "nvswitch/lr10/dev_nxbar_tc_global_ip.h"
66 #include "nvswitch/lr10/dev_sourcetrack_ip.h"
67 
68 #include "oob/smbpbi.h"
69 
70 #define DMA_ADDR_WIDTH_LR10     64
71 #define ROUTE_GANG_TABLE_SIZE (1 << DRF_SIZE(NV_ROUTE_REG_TABLE_ADDRESS_INDEX))
72 
73 static void
_nvswitch_deassert_link_resets_lr10(nvswitch_device * device)74 _nvswitch_deassert_link_resets_lr10
75 (
76     nvswitch_device *device
77 )
78 {
79     NvU32 val, i;
80     NVSWITCH_TIMEOUT timeout;
81     NvBool           keepPolling;
82 
83     NVSWITCH_PRINT(device, WARN,
84         "%s: NVSwitch Driver is taking the links out of reset. This should only happen during forced config.\n",
85         __FUNCTION__);
86 
87     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
88     {
89         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
90 
91         val = NVSWITCH_LINK_RD32_LR10(device, i,
92                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
93         val = FLD_SET_DRF_NUM(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, _LINK_RESET,
94                           NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_DEASSERT, val);
95 
96         NVSWITCH_LINK_WR32_LR10(device, i,
97                 NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, val);
98     }
99 
100     for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++)
101     {
102         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue;
103 
104         // Poll for _RESET_STATUS == _DEASSERTED
105         nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
106 
107         do
108         {
109             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
110 
111             val = NVSWITCH_LINK_RD32_LR10(device, i,
112                     NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET);
113             if (FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
114                         _LINK_RESET_STATUS, _DEASSERTED, val))
115             {
116                 break;
117             }
118 
119             nvswitch_os_sleep(1);
120         }
121         while (keepPolling);
122 
123         if (!FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET,
124                     _LINK_RESET_STATUS, _DEASSERTED, val))
125         {
126             NVSWITCH_PRINT(device, ERROR,
127                 "%s: Timeout waiting for link %d_LINK_RESET_STATUS == _DEASSERTED\n",
128                 __FUNCTION__, i);
129                 // Bug 2974064: Review this timeout handling (fall through)
130         }
131     }
132 }
133 
134 static void
_nvswitch_train_forced_config_link_lr10(nvswitch_device * device,NvU32 linkId)135 _nvswitch_train_forced_config_link_lr10
136 (
137     nvswitch_device *device,
138     NvU32            linkId
139 )
140 {
141     NvU32 data, i;
142     nvlink_link *link;
143 
144     link = nvswitch_get_link(device, linkId);
145 
146     if ((link == NULL) ||
147         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
148         (linkId >= NVSWITCH_NVLINK_MAX_LINKS))
149     {
150         return;
151     }
152 
153     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
154     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_HWCFG, _ENABLE, data);
155     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
156 
157     // Add some delay to let the sim/emu go to SAFE
158     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
159 
160     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST);
161     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_NVHS, _ENABLE, data);
162     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data);
163 
164     // Add some delay to let the sim/emu go to HS
165     NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS);
166 
167     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
168     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE,      _ACTIVE, data);
169     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, data);
170     data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION,        _LTSSM_CHANGE, data);
171     NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE, data);
172 
173     i = 0;
174 
175     // Poll until LINK_CHANGE[1:0] != 2b01.
176     while (i < 5)
177     {
178         data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE);
179 
180         if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _BUSY, data))
181         {
182             NVSWITCH_PRINT(device, INFO,
183                 "%s : Waiting for link %d to go to ACTIVE\n",
184                 __FUNCTION__, linkId);
185         }
186         else if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _FAULT, data))
187         {
188             NVSWITCH_PRINT(device, ERROR,
189                 "%s : Fault while changing LINK to ACTIVE. Link = %d\n",
190                 __FUNCTION__, linkId);
191             break;
192         }
193         else
194         {
195             break;
196         }
197 
198         NVSWITCH_NSEC_DELAY(5 * NVSWITCH_INTERVAL_1USEC_IN_NS);
199         i++;
200     }
201 
202     data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_STATE);
203 
204     if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_STATE, _STATE, _ACTIVE, data))
205     {
206         NVSWITCH_PRINT(device, INFO,
207             "%s : Link %d is in ACTIVE state, setting BUFFER_READY\n",
208             __FUNCTION__, linkId);
209 
210         // Set buffer ready only for nvlink TLC and not NPORT
211         nvswitch_init_buffer_ready(device, link, NV_FALSE);
212     }
213     else
214     {
215         NVSWITCH_PRINT(device, ERROR,
216             "%s : Timeout while waiting for link %d to go to ACTIVE\n",
217             __FUNCTION__, linkId);
218         NVSWITCH_PRINT(device, ERROR,
219             "%s : Link %d is in 0x%x state\n",
220             __FUNCTION__, linkId,DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data));
221     }
222 
223 }
224 
225 void
_nvswitch_setup_chiplib_forced_config_lr10(nvswitch_device * device)226 _nvswitch_setup_chiplib_forced_config_lr10
227 (
228     nvswitch_device *device
229 )
230 {
231     NvU64 links = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) +
232                   ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32);
233     NvU32 i;
234 
235     if (links == 0)
236     {
237         return;
238     }
239 
240     //
241     // First, take the links out of reset
242     //
243     // NOTE: On LR10, MINION will take the links out of reset during INITPHASE1
244     // On platforms where MINION is not present and/or we want to run with forced
245     // config, the driver must de-assert the link reset
246     //
247     _nvswitch_deassert_link_resets_lr10(device);
248 
249     // Next, train the links to ACTIVE/NVHS
250     FOR_EACH_INDEX_IN_MASK(64, i, links)
251     {
252         if (device->link[i].valid)
253         {
254             _nvswitch_train_forced_config_link_lr10(device, i);
255         }
256     }
257     FOR_EACH_INDEX_IN_MASK_END;
258 }
259 
260 /*!
261  * @brief Parse packed little endian data and unpack into padded structure
262  *
263  * @param[in]   format          Data format
264  * @param[in]   packedData      Packed little endian data
265  * @param[out]  unpackedData    Unpacked padded structure
266  * @param[out]  unpackedSize    Unpacked data size
267  * @param[out]  fieldsCount     Number of fields
268  *
269  * @return 'NV_OK'
270  */
271 NV_STATUS
_nvswitch_devinit_unpack_structure(const char * format,const NvU8 * packedData,NvU32 * unpackedData,NvU32 * unpackedSize,NvU32 * fieldsCount)272 _nvswitch_devinit_unpack_structure
273 (
274     const char *format,
275     const NvU8 *packedData,
276     NvU32      *unpackedData,
277     NvU32      *unpackedSize,
278     NvU32      *fieldsCount
279 )
280 {
281     NvU32 unpkdSize = 0;
282     NvU32 fields = 0;
283     NvU32 count;
284     NvU32 data;
285     char fmt;
286 
287     while ((fmt = *format++))
288     {
289         count = 0;
290         while ((fmt >= '0') && (fmt <= '9'))
291         {
292             count *= 10;
293             count += fmt - '0';
294             fmt = *format++;
295         }
296         if (count == 0)
297             count = 1;
298 
299         while (count--)
300         {
301             switch (fmt)
302             {
303                 case 'b':
304                     data = *packedData++;
305                     unpkdSize += 1;
306                     break;
307 
308                 case 's':    // signed byte
309                     data = *packedData++;
310                     if (data & 0x80)
311                         data |= ~0xff;
312                     unpkdSize += 1;
313                     break;
314 
315                 case 'w':
316                     data  = *packedData++;
317                     data |= *packedData++ << 8;
318                     unpkdSize += 2;
319                     break;
320 
321                 case 'd':
322                     data  = *packedData++;
323                     data |= *packedData++ << 8;
324                     data |= *packedData++ << 16;
325                     data |= *packedData++ << 24;
326                     unpkdSize += 4;
327                     break;
328 
329                 default:
330                     return NV_ERR_GENERIC;
331             }
332             *unpackedData++ = data;
333             fields++;
334         }
335     }
336 
337     if (unpackedSize != NULL)
338         *unpackedSize = unpkdSize;
339 
340     if (fieldsCount != NULL)
341         *fieldsCount = fields;
342 
343     return NV_OK;
344 }
345 
346 /*!
347  * @brief Calculate packed and unpacked data size based on given data format
348  *
349  * @param[in]   format          Data format
350  * @param[out]  packedSize      Packed data size
351  * @param[out]  unpackedSize    Unpacked data size
352  *
353  */
354 void
_nvswitch_devinit_calculate_sizes(const char * format,NvU32 * packedSize,NvU32 * unpackedSize)355 _nvswitch_devinit_calculate_sizes
356 (
357     const char *format,
358     NvU32      *packedSize,
359     NvU32      *unpackedSize
360 )
361 {
362     NvU32 unpkdSize = 0;
363     NvU32 pkdSize = 0;
364     NvU32 count;
365     char fmt;
366 
367     while ((fmt = *format++))
368     {
369         count = 0;
370         while ((fmt >= '0') && (fmt <= '9'))
371         {
372             count *= 10;
373             count += fmt - '0';
374             fmt = *format++;
375         }
376         if (count == 0)
377             count = 1;
378 
379         switch (fmt)
380         {
381             case 'b':
382                 pkdSize += count * 1;
383                 unpkdSize += count * sizeof(bios_U008);
384                 break;
385 
386             case 's':    // signed byte
387                 pkdSize += count * 1;
388                 unpkdSize += count * sizeof(bios_S008);
389                 break;
390 
391             case 'w':
392                 pkdSize += count * 2;
393                 unpkdSize += count * sizeof(bios_U016);
394                 break;
395 
396             case 'd':
397                 pkdSize += count * 4;
398                 unpkdSize += count * sizeof(bios_U032);
399                 break;
400         }
401     }
402 
403     if (packedSize != NULL)
404         *packedSize = pkdSize;
405 
406     if (unpackedSize != NULL)
407         *unpackedSize = unpkdSize;
408 }
409 
410 /*!
411  * @brief Calculate packed and unpacked data size based on given data format
412  *
413  * @param[in]   format          Data format
414  * @param[out]  packedSize      Packed data size
415  * @param[out]  unpackedSize    Unpacked data size
416  *
417  */
418 
419 NV_STATUS
_nvswitch_vbios_read_structure(nvswitch_device * device,void * structure,NvU32 offset,NvU32 * ppacked_size,const char * format)420 _nvswitch_vbios_read_structure
421 (
422     nvswitch_device *device,
423     void            *structure,
424     NvU32           offset,
425     NvU32           *ppacked_size,
426     const char      *format
427 )
428 {
429     NvU32  packed_size;
430     NvU8  *packed_data;
431     NvU32  unpacked_bytes;
432 
433     // calculate the size of the data as indicated by its packed format.
434     _nvswitch_devinit_calculate_sizes(format, &packed_size, &unpacked_bytes);
435 
436     if (ppacked_size)
437         *ppacked_size = packed_size;
438 
439     //
440     // is 'offset' too big?
441     // happens when we read bad ptrs from fixed addrs in image frequently
442     //
443     if ((offset + packed_size) > device->biosImage.size)
444     {
445         NVSWITCH_PRINT(device, ERROR, "%s: Bad offset in bios read: 0x%x, max is 0x%x, fmt is '%s'\n",
446                        __FUNCTION__, offset, device->biosImage.size, format);
447         return NV_ERR_GENERIC;
448     }
449 
450     packed_data = &device->biosImage.pImage[offset];
451     return _nvswitch_devinit_unpack_structure(format, packed_data, structure,
452                                               &unpacked_bytes, NULL);
453 }
454 
455 
456 NvlStatus
nvswitch_vbios_read_structure_lr10(nvswitch_device * device,void * structure,NvU32 offset,NvU32 * ppacked_size,const char * format)457 nvswitch_vbios_read_structure_lr10
458 (
459     nvswitch_device *device,
460     void            *structure,
461     NvU32           offset,
462     NvU32           *ppacked_size,
463     const char      *format
464 )
465 {
466     if (NV_OK == _nvswitch_vbios_read_structure(device, structure, offset, ppacked_size, format))
467     {
468        return NVL_SUCCESS;
469     }
470     else
471     {
472        return -NVL_ERR_GENERIC;
473     }
474 }
475 
476 NvU8
_nvswitch_vbios_read8(nvswitch_device * device,NvU32 offset)477 _nvswitch_vbios_read8
478 (
479     nvswitch_device *device,
480     NvU32           offset
481 )
482 {
483     bios_U008 data;     // BiosReadStructure expects 'bios' types
484 
485     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "b");
486 
487     return (NvU8) data;
488 }
489 
490 NvU16
_nvswitch_vbios_read16(nvswitch_device * device,NvU32 offset)491 _nvswitch_vbios_read16
492 (
493     nvswitch_device *device,
494     NvU32           offset
495 )
496 {
497     bios_U016 data;     // BiosReadStructure expects 'bios' types
498 
499     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "w");
500 
501     return (NvU16) data;
502 }
503 
504 
505 NvU32
_nvswitch_vbios_read32(nvswitch_device * device,NvU32 offset)506 _nvswitch_vbios_read32
507 (
508     nvswitch_device *device,
509     NvU32           offset
510 )
511 {
512     bios_U032 data;     // BiosReadStructure expects 'bios' types
513 
514     _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "d");
515 
516     return (NvU32) data;
517 }
518 
519 NV_STATUS
_nvswitch_perform_BIT_offset_update(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)520 _nvswitch_perform_BIT_offset_update
521 (
522     nvswitch_device *device,
523     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
524 )
525 {
526     BIT_HEADER_V1_00         bitHeader;
527     BIT_TOKEN_V1_00          bitToken;
528     NV_STATUS                rmStatus;
529     NvU32                    dataPointerOffset;
530     NvU32 i;
531 
532     rmStatus = _nvswitch_vbios_read_structure(device,
533                                               (NvU8*) &bitHeader,
534                                               bios_config->bit_address,
535                                               (NvU32 *) 0,
536                                               BIT_HEADER_V1_00_FMT);
537 
538     if(rmStatus != NV_OK)
539     {
540         NVSWITCH_PRINT(device, ERROR,
541                        "%s: Failed to read BIT table structure!.\n",
542                        __FUNCTION__);
543         return rmStatus;
544     }
545 
546     for(i=0; i < bitHeader.TokenEntries; i++)
547     {
548         NvU32 BitTokenLocation = bios_config->bit_address + bitHeader.HeaderSize + (i * bitHeader.TokenSize);
549         rmStatus = _nvswitch_vbios_read_structure(device,
550                                                  (NvU8*) &bitToken,
551                                                  BitTokenLocation,
552                                                  (NvU32 *) 0,
553                                                  BIT_TOKEN_V1_00_FMT);
554         if(rmStatus != NV_OK)
555         {
556             NVSWITCH_PRINT(device, WARN,
557                 "%s: Failed to read BIT token %d!\n",
558                 __FUNCTION__, i);
559             return NV_ERR_GENERIC;
560         }
561 
562         dataPointerOffset = (bios_config->pci_image_address + bitToken.DataPtr);
563         switch(bitToken.TokenId)
564         {
565             case BIT_TOKEN_NVINIT_PTRS:
566             {
567                 BIT_DATA_NVINIT_PTRS_V1 nvInitTablePtrs;
568                 rmStatus = _nvswitch_vbios_read_structure(device,
569                                                           (NvU8*) &nvInitTablePtrs,
570                                                           dataPointerOffset,
571                                                           (NvU32 *) 0,
572                                                           BIT_DATA_NVINIT_PTRS_V1_30_FMT);
573                 if (rmStatus != NV_OK)
574                 {
575                     NVSWITCH_PRINT(device, WARN,
576                                    "%s: Failed to read internal data structure\n",
577                                    __FUNCTION__);
578                     return NV_ERR_GENERIC;
579                 }
580                 // Update the retrived info with device info
581                 bios_config->nvlink_config_table_address = (nvInitTablePtrs.NvlinkConfigDataPtr + bios_config->pci_image_address);
582             }
583             break;
584         }
585     }
586 
587     return NV_OK;
588 }
589 
590 NV_STATUS
_nvswitch_validate_BIT_header(nvswitch_device * device,NvU32 bit_address)591 _nvswitch_validate_BIT_header
592 (
593     nvswitch_device *device,
594     NvU32            bit_address
595 )
596 {
597     NvU32    headerSize = 0;
598     NvU32    chkSum = 0;
599     NvU32    i;
600 
601     //
602     // For now let's assume the Header Size is always at the same place.
603     // We can create something more complex if needed later.
604     //
605     headerSize = (NvU32)_nvswitch_vbios_read8(device, bit_address + BIT_HEADER_SIZE_OFFSET);
606 
607     // Now perform checksum
608     for (i = 0; i < headerSize; i++)
609         chkSum += (NvU32)_nvswitch_vbios_read8(device, bit_address + i);
610 
611     //Byte checksum removes upper bytes
612     chkSum = chkSum & 0xFF;
613 
614     if (chkSum)
615         return NV_ERR_GENERIC;
616 
617     return NV_OK;
618 }
619 
620 
621 NV_STATUS
nvswitch_verify_header(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)622 nvswitch_verify_header
623 (
624     nvswitch_device *device,
625     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
626 )
627 {
628     NvU32       i;
629     NV_STATUS   status = NV_ERR_GENERIC;
630 
631     if ((bios_config == NULL) || (!bios_config->pci_image_address))
632     {
633         NVSWITCH_PRINT(device, ERROR,
634             "%s: PCI Image offset is not identified\n",
635             __FUNCTION__);
636         return status;
637     }
638 
639     // attempt to find the init info in the BIOS
640     for (i = bios_config->pci_image_address; i < device->biosImage.size - 3; i++)
641     {
642         NvU16 bitheaderID = _nvswitch_vbios_read16(device, i);
643         if (bitheaderID == BIT_HEADER_ID)
644         {
645             NvU32 signature = _nvswitch_vbios_read32(device, i + 2);
646             if (signature == BIT_HEADER_SIGNATURE)
647             {
648                 bios_config->bit_address = i;
649 
650                 // Checksum BIT to prove accuracy
651                 if (NV_OK != _nvswitch_validate_BIT_header(device, bios_config->bit_address))
652                 {
653                     device->biosImage.pImage = 0;
654                     device->biosImage.size = 0;
655                 }
656             }
657         }
658         // only if we find the bit address do we break
659         if (bios_config->bit_address)
660             break;
661     }
662     if (bios_config->bit_address)
663     {
664         status = NV_OK;
665     }
666 
667     return status;
668 }
669 
670 NV_STATUS
_nvswitch_vbios_update_bit_Offset(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)671 _nvswitch_vbios_update_bit_Offset
672 (
673     nvswitch_device *device,
674     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
675 )
676 {
677     NV_STATUS   status = NV_OK;
678 
679     if (bios_config->bit_address)
680     {
681         goto vbios_update_bit_Offset_done;
682     }
683 
684     status = nvswitch_verify_header(device, bios_config);
685     if (status != NV_OK)
686     {
687         NVSWITCH_PRINT(device, ERROR, "%s: *** BIT header is not found in vbios!\n",
688             __FUNCTION__);
689         goto vbios_update_bit_Offset_done;
690     }
691 
692     if (bios_config->bit_address)
693     {
694 
695         status = _nvswitch_perform_BIT_offset_update(device, bios_config);
696         if (status != NV_OK)
697             goto vbios_update_bit_Offset_done;
698     }
699 
700 vbios_update_bit_Offset_done:
701     return status;
702 }
703 
704 
705 NV_STATUS
_nvswitch_vbios_identify_pci_image_loc(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)706 _nvswitch_vbios_identify_pci_image_loc
707 (
708     nvswitch_device         *device,
709     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
710 )
711 {
712     NV_STATUS   status = NV_OK;
713     NvU32       i;
714 
715     if (bios_config->pci_image_address)
716     {
717         goto vbios_identify_pci_image_loc_done;
718     }
719 
720     // Match the PCI_EXP_ROM_SIGNATURE and followed by the PCI Data structure
721     // with PCIR and matching vendor ID
722     NVSWITCH_PRINT(device, SETUP,
723         "%s: Verifying and extracting PCI Data.\n",
724         __FUNCTION__);
725 
726     // attempt to find the init info in the BIOS
727     for (i = 0; i < (device->biosImage.size - PCI_ROM_HEADER_PCI_DATA_SIZE); i++)
728     {
729         NvU16 pci_rom_sigature = _nvswitch_vbios_read16(device, i);
730 
731         if (pci_rom_sigature == PCI_EXP_ROM_SIGNATURE)
732         {
733             NvU32 pcir_data_dffSet  = _nvswitch_vbios_read16(device, i + PCI_ROM_HEADER_SIZE);  // 0x16 -> 0x18 i.e, including the ROM Signature bytes
734 
735             if (((i + pcir_data_dffSet) + PCI_DATA_STRUCT_SIZE) < device->biosImage.size)
736             {
737                 NvU32 pcirSigature = _nvswitch_vbios_read32(device, (i + pcir_data_dffSet));
738 
739                 if (pcirSigature == PCI_DATA_STRUCT_SIGNATURE)
740                 {
741                     PCI_DATA_STRUCT pciData;
742                     status = _nvswitch_vbios_read_structure(device,
743                                                            (NvU8*) &pciData,
744                                                             i + pcir_data_dffSet,
745                                                             (NvU32 *) 0,
746                                                             PCI_DATA_STRUCT_FMT);
747                     if (status != NV_OK)
748                     {
749                         NVSWITCH_PRINT(device, WARN,
750                                        "%s: Failed to PCI Data for validation\n",
751                                        __FUNCTION__);
752                         goto vbios_identify_pci_image_loc_done;
753                     }
754 
755                     // Validate the vendor details as well
756                     if (pciData.vendorID == PCI_VENDOR_ID_NVIDIA)
757                     {
758                         bios_config->pci_image_address = i;
759                         break;
760                     }
761                 }
762             }
763         }
764     }
765 
766 vbios_identify_pci_image_loc_done:
767     return status;
768 }
769 
_nvswitch_get_nvlink_config_address(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)770 NvU32 _nvswitch_get_nvlink_config_address
771 (
772     nvswitch_device         *device,
773     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
774 )
775 {
776     return bios_config->nvlink_config_table_address;
777 }
778 
779 NV_STATUS
_nvswitch_read_vbios_link_base_entry(nvswitch_device * device,NvU32 tblPtr,NVLINK_CONFIG_DATA_BASEENTRY * link_base_entry)780 _nvswitch_read_vbios_link_base_entry
781 (
782     nvswitch_device *device,
783     NvU32            tblPtr,
784     NVLINK_CONFIG_DATA_BASEENTRY  *link_base_entry
785 )
786 {
787     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
788     NVLINK_VBIOS_CONFIG_DATA_BASEENTRY vbios_link_base_entry;
789 
790     status = _nvswitch_vbios_read_structure(device, &vbios_link_base_entry, tblPtr, (NvU32 *)0, NVLINK_CONFIG_DATA_BASEENTRY_FMT);
791     if (status != NV_OK)
792     {
793         NVSWITCH_PRINT(device, ERROR,
794             "%s: Error on reading nvlink base entry\n",
795             __FUNCTION__);
796         return status;
797     }
798 
799     link_base_entry->positionId = vbios_link_base_entry.positionId;
800 
801     return status;
802 }
803 
804 NvlStatus
nvswitch_read_vbios_link_entries_lr10(nvswitch_device * device,NvU32 tblPtr,NvU32 expected_link_entriesCount,NVLINK_CONFIG_DATA_LINKENTRY * link_entries,NvU32 * identified_link_entriesCount)805 nvswitch_read_vbios_link_entries_lr10
806 (
807     nvswitch_device              *device,
808     NvU32                         tblPtr,
809     NvU32                         expected_link_entriesCount,
810     NVLINK_CONFIG_DATA_LINKENTRY *link_entries,
811     NvU32                        *identified_link_entriesCount
812 )
813 {
814     NV_STATUS status = NV_ERR_INVALID_PARAMETER;
815     NvU32 i;
816     NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20 vbios_link_entry;
817     *identified_link_entriesCount = 0;
818 
819     for (i = 0; i < expected_link_entriesCount; i++)
820     {
821         status = _nvswitch_vbios_read_structure(device,
822                                                 &vbios_link_entry,
823                                                 tblPtr, (NvU32 *)0,
824                                                 NVLINK_CONFIG_DATA_LINKENTRY_FMT_20);
825         if (status != NV_OK)
826         {
827             NVSWITCH_PRINT(device, ERROR,
828                 "%s: Error on reading nvlink entry\n",
829                 __FUNCTION__);
830             return status;
831         }
832         link_entries[i].nvLinkparam0 = (NvU8)vbios_link_entry.nvLinkparam0;
833         link_entries[i].nvLinkparam1 = (NvU8)vbios_link_entry.nvLinkparam1;
834         link_entries[i].nvLinkparam2 = (NvU8)vbios_link_entry.nvLinkparam2;
835         link_entries[i].nvLinkparam3 = (NvU8)vbios_link_entry.nvLinkparam3;
836         link_entries[i].nvLinkparam4 = (NvU8)vbios_link_entry.nvLinkparam4;
837         link_entries[i].nvLinkparam5 = (NvU8)vbios_link_entry.nvLinkparam5;
838         link_entries[i].nvLinkparam6 = (NvU8)vbios_link_entry.nvLinkparam6;
839         tblPtr += (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32));
840 
841         NVSWITCH_PRINT(device, NOISY,
842             "<<<---- NvLink ID 0x%x ---->>>\n", i);
843         NVSWITCH_PRINT(device, NOISY,
844             "NVLink Params 0 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam0, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam0));
845         NVSWITCH_PRINT(device, NOISY,
846             "NVLink Params 1 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam1, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam1));
847         NVSWITCH_PRINT(device, NOISY,
848             "NVLink Params 2 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam2, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam2));
849         NVSWITCH_PRINT(device, NOISY,
850             "NVLink Params 3 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam3, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam3));
851         NVSWITCH_PRINT(device, NOISY,
852             "NVLink Params 4 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam4, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam4));
853         NVSWITCH_PRINT(device, NOISY,
854             "NVLink Params 5 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam5, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam5));
855         NVSWITCH_PRINT(device, NOISY,
856             "NVLink Params 6 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam6, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam6));
857         NVSWITCH_PRINT(device, NOISY,
858             "<<<---- NvLink ID 0x%x ---->>>\n\n", i);
859     }
860     *identified_link_entriesCount = i;
861     return status;
862 }
863 
864 NV_STATUS
_nvswitch_vbios_fetch_nvlink_entries(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)865 _nvswitch_vbios_fetch_nvlink_entries
866 (
867     nvswitch_device         *device,
868     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
869 )
870 {
871     NvU32                       tblPtr;
872     NvU8                        version;
873     NvU8                        size;
874     NV_STATUS                   status = NV_ERR_GENERIC;
875     NVLINK_CONFIG_DATA_HEADER   header;
876     NvU32                       base_entry_index;
877     NvU32                       expected_base_entry_count;
878 
879     tblPtr = _nvswitch_get_nvlink_config_address(device, bios_config);
880     if (!tblPtr)
881     {
882         NVSWITCH_PRINT(device, ERROR,
883             "%s: No NvLink Config table set\n",
884             __FUNCTION__);
885         goto vbios_fetch_nvlink_entries_done;
886     }
887 
888     // Read the table version number
889     version = _nvswitch_vbios_read8(device, tblPtr);
890     switch (version)
891     {
892         case NVLINK_CONFIG_DATA_HEADER_VER_20:
893         case NVLINK_CONFIG_DATA_HEADER_VER_30:
894             size = _nvswitch_vbios_read8(device, tblPtr + 1);
895             if (size == NVLINK_CONFIG_DATA_HEADER_20_SIZE)
896             {
897                 // Grab Nvlink Config Data Header
898                 status = _nvswitch_vbios_read_structure(device, &header.ver_20, tblPtr, (NvU32 *) 0, NVLINK_CONFIG_DATA_HEADER_20_FMT);
899 
900                 if (status != NV_OK)
901                 {
902                     NVSWITCH_PRINT(device, ERROR,
903                         "%s: Error on reading the nvlink config header\n",
904                         __FUNCTION__);
905                 }
906             }
907             break;
908         default:
909             NVSWITCH_PRINT(device, ERROR,
910                 "%s: Invalid version 0x%x\n",
911                 __FUNCTION__, version);
912     }
913     if (status != NV_OK)
914     {
915         goto vbios_fetch_nvlink_entries_done;
916     }
917 
918     NVSWITCH_PRINT(device, NOISY,
919         "<<<---- NvLink Header ---->>>\n\n");
920     NVSWITCH_PRINT(device, NOISY,
921         "Version \t\t 0x%x\n", header.ver_20.Version);
922     NVSWITCH_PRINT(device, NOISY,
923         "Header Size \t0x%x\n", header.ver_20.HeaderSize);
924     NVSWITCH_PRINT(device, NOISY,
925         "Base Entry Size \t0x%x\n", header.ver_20.BaseEntrySize);
926     NVSWITCH_PRINT(device, NOISY,
927         "Base Entry count \t0x%x\n", header.ver_20.BaseEntryCount);
928     NVSWITCH_PRINT(device, NOISY,
929         "Link Entry Size \t0x%x\n", header.ver_20.LinkEntrySize);
930     NVSWITCH_PRINT(device, NOISY,
931         "Link Entry Count \t0x%x\n", header.ver_20.LinkEntryCount);
932     NVSWITCH_PRINT(device, NOISY,
933         "Reserved \t0x%x\n", header.ver_20.Reserved);
934     NVSWITCH_PRINT(device, NOISY,
935         "<<<---- NvLink Header ---->>>\n");
936     if (header.ver_20.Version == NVLINK_CONFIG_DATA_HEADER_VER_20)
937     {
938          device->bIsNvlinkVbiosTableVersion2 = NV_TRUE;
939     }
940     expected_base_entry_count = header.ver_20.BaseEntryCount;
941     if (expected_base_entry_count > NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY)
942     {
943         NVSWITCH_PRINT(device, WARN,
944             "%s: Greater than expected base entry count 0x%x - Restricting to count 0x%x\n",
945             __FUNCTION__, expected_base_entry_count, NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY);
946         expected_base_entry_count = NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY;
947     }
948 
949     tblPtr += header.ver_20.HeaderSize;
950     for (base_entry_index = 0; base_entry_index < expected_base_entry_count; base_entry_index++)
951     {
952         NvU32 expected_link_entriesCount = header.ver_20.LinkEntryCount;
953         if (expected_link_entriesCount > NVSWITCH_LINK_COUNT(device))
954         {
955             NVSWITCH_PRINT(device, WARN,
956                 "%s: Greater than expected link count 0x%x - Restricting to count 0x%x\n",
957                 __FUNCTION__, expected_link_entriesCount, NVSWITCH_LINK_COUNT(device));
958             expected_link_entriesCount = NVSWITCH_LINK_COUNT(device);
959         }
960 
961         // Grab Nvlink Config Data Base Entry
962         _nvswitch_read_vbios_link_base_entry(device, tblPtr, &bios_config->link_vbios_base_entry[base_entry_index]);
963         tblPtr += header.ver_20.BaseEntrySize;
964         device->hal.nvswitch_read_vbios_link_entries(device,
965                                           tblPtr,
966                                           expected_link_entriesCount,
967                                           bios_config->link_vbios_entry[base_entry_index],
968                                           &bios_config->identified_Link_entries[base_entry_index]);
969 
970         if (device->bIsNvlinkVbiosTableVersion2)
971         {
972             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32)));
973         }
974         else
975         {
976             tblPtr += (expected_link_entriesCount * (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_30)/sizeof(NvU32)));
977         }
978     }
979 vbios_fetch_nvlink_entries_done:
980     return status;
981 }
982 
983 NV_STATUS
_nvswitch_vbios_assign_base_entry(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)984 _nvswitch_vbios_assign_base_entry
985 (
986     nvswitch_device         *device,
987     NVSWITCH_BIOS_NVLINK_CONFIG    *bios_config
988 )
989 {
990     NvU32 physical_id;
991     NvU32 entry_index;
992 
993     physical_id = nvswitch_read_physical_id(device);
994 
995     for (entry_index = 0; entry_index < NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY; entry_index++)
996     {
997         if (physical_id == bios_config->link_vbios_base_entry[entry_index].positionId)
998         {
999             bios_config->link_base_entry_assigned = entry_index;
1000             return NV_OK;
1001         }
1002     }
1003 
1004     // TODO: Bug 3507948
1005     NVSWITCH_PRINT(device, ERROR,
1006             "%s: Error on assigning base entry. Setting base entry index = 0\n",
1007             __FUNCTION__);
1008     bios_config->link_base_entry_assigned = 0;
1009 
1010     return NV_OK;
1011 }
1012 
1013 NV_STATUS
_nvswitch_setup_link_vbios_overrides(nvswitch_device * device,NVSWITCH_BIOS_NVLINK_CONFIG * bios_config)1014 _nvswitch_setup_link_vbios_overrides
1015 (
1016     nvswitch_device *device,
1017     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config
1018 )
1019 {
1020     NV_STATUS    status         = NV_OK;
1021 
1022     if (bios_config == NULL)
1023     {
1024         NVSWITCH_PRINT(device, ERROR,
1025                 "%s: BIOS config override not supported\n",
1026                 __FUNCTION__);
1027          return -NVL_ERR_NOT_SUPPORTED;
1028     }
1029 
1030     bios_config->vbios_disabled_link_mask = 0;
1031 
1032     bios_config->bit_address                 = 0;
1033     bios_config->pci_image_address           = 0;
1034     bios_config->nvlink_config_table_address = 0;
1035 
1036     if ((device->biosImage.size == 0) || (device->biosImage.pImage == NULL))
1037     {
1038         NVSWITCH_PRINT(device, ERROR,
1039                 "%s: VBIOS not exist size:0x%x\n",
1040                 __FUNCTION__, device->biosImage.size);
1041          return -NVL_ERR_NOT_SUPPORTED;
1042     }
1043 
1044     //
1045     // Locate the PCI ROM Image
1046     //
1047     if (_nvswitch_vbios_identify_pci_image_loc(device, bios_config)  != NV_OK)
1048     {
1049         NVSWITCH_PRINT(device, ERROR,
1050             "%s: Error on identifying pci image loc\n",
1051             __FUNCTION__);
1052         status = NV_ERR_GENERIC;
1053         goto setup_link_vbios_overrides_done;
1054     }
1055 
1056     //
1057     // Locate and fetch BIT offset
1058     //
1059     if (_nvswitch_vbios_update_bit_Offset(device, bios_config) != NV_OK)
1060     {
1061         NVSWITCH_PRINT(device, ERROR,
1062             "%s: Error on identifying pci image loc\n",
1063             __FUNCTION__);
1064         status = NV_ERR_GENERIC;
1065         goto setup_link_vbios_overrides_done;
1066     }
1067 
1068     //
1069     // Fetch NvLink Entries
1070     //
1071     if (_nvswitch_vbios_fetch_nvlink_entries(device, bios_config) != NV_OK)
1072     {
1073         NVSWITCH_PRINT(device, ERROR,
1074             "%s: Error on fetching nvlink entries\n",
1075             __FUNCTION__);
1076         status = NV_ERR_GENERIC;
1077         goto setup_link_vbios_overrides_done;
1078     }
1079 
1080     //
1081     // Assign Base Entry for this device
1082     //
1083     if (_nvswitch_vbios_assign_base_entry(device, bios_config) != NV_OK)
1084     {
1085         NVSWITCH_PRINT(device, ERROR,
1086             "%s: Error on assigning base entry\n",
1087             __FUNCTION__);
1088         status = NV_ERR_GENERIC;
1089         goto setup_link_vbios_overrides_done;
1090     }
1091 
1092 setup_link_vbios_overrides_done:
1093     if (status != NV_OK)
1094     {
1095         bios_config->bit_address                = 0;
1096         bios_config->pci_image_address          = 0;
1097         bios_config->nvlink_config_table_address =0;
1098     }
1099     return status;
1100 }
1101 
1102 /*
1103  * @Brief : Setting up system registers after device initialization
1104  *
1105  * @Description :
1106  *
1107  * @param[in] device        a reference to the device to initialize
1108  */
1109 NvlStatus
nvswitch_setup_system_registers_lr10(nvswitch_device * device)1110 nvswitch_setup_system_registers_lr10
1111 (
1112     nvswitch_device *device
1113 )
1114 {
1115     nvlink_link *link;
1116     NvU8 i;
1117     NvU64 enabledLinkMask;
1118 
1119     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
1120 
1121     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
1122     {
1123         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
1124 
1125         link = nvswitch_get_link(device, i);
1126 
1127         if ((link == NULL) ||
1128             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
1129             (i >= NVSWITCH_NVLINK_MAX_LINKS))
1130         {
1131             continue;
1132         }
1133 
1134         nvswitch_setup_link_system_registers(device, link);
1135         nvswitch_load_link_disable_settings(device, link);
1136     }
1137     FOR_EACH_INDEX_IN_MASK_END;
1138 
1139     return NVL_SUCCESS;
1140 }
1141 
1142 NvlStatus
nvswitch_deassert_link_reset_lr10(nvswitch_device * device,nvlink_link * link)1143 nvswitch_deassert_link_reset_lr10
1144 (
1145     nvswitch_device *device,
1146     nvlink_link     *link
1147 )
1148 {
1149     NvU64 mode;
1150     NvlStatus status = NVL_SUCCESS;
1151 
1152     status = device->hal.nvswitch_corelib_get_dl_link_mode(link, &mode);
1153 
1154     if (status != NVL_SUCCESS)
1155     {
1156         NVSWITCH_PRINT(device, ERROR,
1157                 "%s:DL link mode failed on link %d\n",
1158                 __FUNCTION__, link->linkNumber);
1159         return status;
1160     }
1161 
1162     // Check if the link is RESET
1163     if (mode != NVLINK_LINKSTATE_RESET)
1164     {
1165         return NVL_SUCCESS;
1166     }
1167 
1168     // Send INITPHASE1 to bring link out of reset
1169     status = link->link_handlers->set_dl_link_mode(link,
1170                                         NVLINK_LINKSTATE_INITPHASE1,
1171                                         NVLINK_STATE_CHANGE_ASYNC);
1172 
1173     if (status != NVL_SUCCESS)
1174     {
1175         NVSWITCH_PRINT(device, ERROR,
1176                 "%s: INITPHASE1 failed on link %d\n",
1177                 __FUNCTION__, link->linkNumber);
1178     }
1179 
1180     return status;
1181 }
1182 
1183 static NvU32
_nvswitch_get_num_vcs_lr10(nvswitch_device * device)1184 _nvswitch_get_num_vcs_lr10
1185 (
1186     nvswitch_device *device
1187 )
1188 {
1189     return NVSWITCH_NUM_VCS_LR10;
1190 }
1191 
1192 void
nvswitch_determine_platform_lr10(nvswitch_device * device)1193 nvswitch_determine_platform_lr10
1194 (
1195     nvswitch_device *device
1196 )
1197 {
1198     NvU32 value;
1199 
1200     //
1201     // Determine which model we are using SMC_BOOT_2 and OS query
1202     //
1203     value = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_2);
1204     device->is_emulation = FLD_TEST_DRF(_PSMC, _BOOT_2, _EMULATION, _YES, value);
1205 
1206     if (!IS_EMULATION(device))
1207     {
1208         // If we are not on fmodel, we must be on RTL sim or silicon
1209         if (FLD_TEST_DRF(_PSMC, _BOOT_2, _FMODEL, _YES, value))
1210         {
1211             device->is_fmodel = NV_TRUE;
1212         }
1213         else
1214         {
1215             device->is_rtlsim = NV_TRUE;
1216 
1217             // Let OS code finalize RTL sim vs silicon setting
1218             nvswitch_os_override_platform(device->os_handle, &device->is_rtlsim);
1219         }
1220     }
1221 
1222 #if defined(NVLINK_PRINT_ENABLED)
1223     {
1224         const char *build;
1225         const char *mode;
1226 
1227         build = "HW";
1228         if (IS_FMODEL(device))
1229             mode = "fmodel";
1230         else if (IS_RTLSIM(device))
1231             mode = "rtlsim";
1232         else if (IS_EMULATION(device))
1233             mode = "emulation";
1234         else
1235             mode = "silicon";
1236 
1237         NVSWITCH_PRINT(device, SETUP,
1238             "%s: build: %s platform: %s\n",
1239              __FUNCTION__, build, mode);
1240     }
1241 #endif // NVLINK_PRINT_ENABLED
1242 }
1243 
1244 static void
_nvswitch_portstat_reset_latency_counters(nvswitch_device * device)1245 _nvswitch_portstat_reset_latency_counters
1246 (
1247     nvswitch_device *device
1248 )
1249 {
1250     // Set SNAPONDEMAND from 0->1 to reset the counters
1251     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1252         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1253         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1254 
1255     // Set SNAPONDEMAND back to 0.
1256     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
1257         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1258         DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1259 }
1260 
1261 //
1262 // Data collector which runs on a background thread, collecting latency stats.
1263 //
1264 // The latency counters have a maximum window period of 3.299 seconds
1265 // (2^32 clk cycles). The counters reset after this period. So SW snaps
1266 // the bins and records latencies every 3 seconds. Setting SNAPONDEMAND from 0->1
1267 // snaps the  latency counters and updates them to PRI registers for
1268 // the SW to read. It then resets the counters to start collecting fresh latencies.
1269 //
1270 
1271 void
nvswitch_internal_latency_bin_log_lr10(nvswitch_device * device)1272 nvswitch_internal_latency_bin_log_lr10
1273 (
1274     nvswitch_device *device
1275 )
1276 {
1277     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1278     NvU32 idx_nport;
1279     NvU32 idx_vc;
1280     NvBool vc_valid;
1281     NvU32 latency;
1282     NvU64 time_nsec;
1283     NvU32 link_type;    // Access or trunk link
1284     NvU64 last_visited_time_nsec;
1285 
1286     if (chip_device->latency_stats == NULL)
1287     {
1288         // Latency stat buffers not allocated yet
1289         return;
1290     }
1291 
1292     time_nsec = nvswitch_os_get_platform_time();
1293     last_visited_time_nsec = chip_device->latency_stats->last_visited_time_nsec;
1294 
1295     // Update last visited time
1296     chip_device->latency_stats->last_visited_time_nsec = time_nsec;
1297 
1298     // Compare time stamp and reset the counters if the snap is missed
1299     if (!IS_RTLSIM(device) || !IS_FMODEL(device))
1300     {
1301         if ((last_visited_time_nsec != 0) &&
1302             ((time_nsec - last_visited_time_nsec) > 3 * NVSWITCH_INTERVAL_1SEC_IN_NS))
1303         {
1304             NVSWITCH_PRINT(device, ERROR,
1305                 "Latency metrics recording interval missed.  Resetting counters.\n");
1306             _nvswitch_portstat_reset_latency_counters(device);
1307             return;
1308         }
1309     }
1310 
1311     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
1312     {
1313         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
1314         {
1315             continue;
1316         }
1317 
1318         // Setting SNAPONDEMAND from 0->1 snaps the latencies and resets the counters
1319         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1320             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1321             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE));
1322 
1323         //
1324         // TODO: Check _STARTCOUNTER and don't log if counter not enabled.
1325         // Currently all counters are always enabled
1326         //
1327 
1328         link_type = NVSWITCH_LINK_RD32_LR10(device, idx_nport, NPORT, _NPORT, _CTRL);
1329         for (idx_vc = 0; idx_vc < NVSWITCH_NUM_VCS_LR10; idx_vc++)
1330         {
1331             vc_valid = NV_FALSE;
1332 
1333             // VC's CREQ0(0) and RSP0(5) are relevant on access links.
1334             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, link_type) &&
1335                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) ||
1336                 (idx_vc == NV_NPORT_VC_MAPPING_RSP0)))
1337             {
1338                 vc_valid = NV_TRUE;
1339             }
1340 
1341             // VC's CREQ0(0), RSP0(5), CREQ1(6) and RSP1(7) are relevant on trunk links.
1342             if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, link_type) &&
1343                 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0)  ||
1344                  (idx_vc == NV_NPORT_VC_MAPPING_RSP0)   ||
1345                  (idx_vc == NV_NPORT_VC_MAPPING_CREQ1)  ||
1346                  (idx_vc == NV_NPORT_VC_MAPPING_RSP1)))
1347             {
1348                 vc_valid = NV_TRUE;
1349             }
1350 
1351             // If the VC is not being used, skip reading it
1352             if (!vc_valid)
1353             {
1354                 continue;
1355             }
1356 
1357             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _LOW, idx_vc);
1358             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].low += latency;
1359 
1360             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _MEDIUM, idx_vc);
1361             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].medium += latency;
1362 
1363             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _HIGH, idx_vc);
1364             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].high += latency;
1365 
1366             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _PANIC, idx_vc);
1367             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].panic += latency;
1368 
1369             latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _PACKET, _COUNT, idx_vc);
1370             chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].count += latency;
1371 
1372             // Note the time of this snap
1373             chip_device->latency_stats->latency[idx_vc].last_read_time_nsec = time_nsec;
1374             chip_device->latency_stats->latency[idx_vc].count++;
1375         }
1376 
1377         // Disable SNAPONDEMAND after fetching the latencies
1378         NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL,
1379             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
1380             DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
1381     }
1382 }
1383 
1384 void
nvswitch_ecc_writeback_task_lr10(nvswitch_device * device)1385 nvswitch_ecc_writeback_task_lr10
1386 (
1387     nvswitch_device *device
1388 )
1389 {
1390 }
1391 
1392 void
nvswitch_set_ganged_link_table_lr10(nvswitch_device * device,NvU32 firstIndex,NvU64 * ganged_link_table,NvU32 numEntries)1393 nvswitch_set_ganged_link_table_lr10
1394 (
1395     nvswitch_device *device,
1396     NvU32            firstIndex,
1397     NvU64           *ganged_link_table,
1398     NvU32            numEntries
1399 )
1400 {
1401     NvU32 i;
1402 
1403     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_ADDRESS,
1404         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _INDEX, firstIndex) |
1405         DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _AUTO_INCR, 1));
1406 
1407     for (i = 0; i < numEntries; i++)
1408     {
1409         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1410             NvU64_LO32(ganged_link_table[i]));
1411 
1412         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0,
1413             NvU64_HI32(ganged_link_table[i]));
1414     }
1415 }
1416 
1417 static NvlStatus
_nvswitch_init_ganged_link_routing(nvswitch_device * device)1418 _nvswitch_init_ganged_link_routing
1419 (
1420     nvswitch_device *device
1421 )
1422 {
1423     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1424     NvU32        gang_index, gang_size;
1425     NvU64        gang_entry;
1426     NvU32        block_index;
1427     NvU32        block_count = 16;
1428     NvU32        glt_entries = 16;
1429     NvU32        glt_size = ROUTE_GANG_TABLE_SIZE / 2;
1430     NvU64        *ganged_link_table = NULL;
1431     NvU32        block_size = ROUTE_GANG_TABLE_SIZE / block_count;
1432     NvU32        table_index = 0;
1433     NvU32        i;
1434 
1435     //
1436     // Refer to switch IAS 11.2 Figure 82. Limerock Ganged RAM Table Format
1437     //
1438     // The ganged link routing table is composed of 512 entries divided into 16 sections.
1439     // Each section specifies how requests should be routed through the ganged links.
1440     // Each 32-bit entry is composed of eight 4-bit fields specifying the set of of links
1441     // to distribute through.  More complex spray patterns could be constructed, but for
1442     // now initialize it with a uniform distribution pattern.
1443     //
1444     // The ganged link routing table will be loaded with following values:
1445     // Typically the first section would be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,15),...
1446     // Typically the second section would be filled with (0,0,0,0,0,0,0,0), (0,0,0,0,0,0,0,0),...
1447     // Typically the third section would be filled with (0,1,0,1,0,1,0,1), (0,1,0,1,0,1,0,1),...
1448     // Typically the third section would be filled with (0,1,2,0,1,2,0,1), (2,0,1,2,0,1,2,0),...
1449     //  :
1450     // The last section would typically be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,0),...
1451     //
1452     // Refer table 20: Definition of size bits used with Ganged Link Number Table.
1453     // Note that section 0 corresponds with 16 ganged links.  Section N corresponds with
1454     // N ganged links.
1455     //
1456 
1457     //Alloc memory for Ganged Link Table
1458     ganged_link_table = nvswitch_os_malloc(glt_size * sizeof(gang_entry));
1459     if (ganged_link_table == NULL)
1460     {
1461         NVSWITCH_PRINT(device, ERROR,
1462             "Failed to allocate memory for GLT!!\n");
1463         return -NVL_NO_MEM;
1464     }
1465 
1466     for (block_index = 0; block_index < block_count; block_index++)
1467     {
1468         gang_size = ((block_index==0) ? 16 : block_index);
1469 
1470         for (gang_index = 0; gang_index < block_size/2; gang_index++)
1471         {
1472             gang_entry = 0;
1473             NVSWITCH_ASSERT(table_index < glt_size);
1474 
1475             for (i = 0; i < glt_entries; i++)
1476             {
1477                 gang_entry |=
1478                     DRF_NUM64(_ROUTE, _REG_TABLE_DATA0, _GLX(i), (16 * gang_index + i) % gang_size);
1479             }
1480 
1481             ganged_link_table[table_index++] = gang_entry;
1482         }
1483     }
1484 
1485     nvswitch_set_ganged_link_table_lr10(device, 0, ganged_link_table, glt_size);
1486 
1487     chip_device->ganged_link_table = ganged_link_table;
1488 
1489     return NVL_SUCCESS;
1490 }
1491 
1492 static NvlStatus
nvswitch_initialize_ip_wrappers_lr10(nvswitch_device * device)1493 nvswitch_initialize_ip_wrappers_lr10
1494 (
1495     nvswitch_device *device
1496 )
1497 {
1498     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
1499     NvU32 engine_enable_mask;
1500     NvU32 engine_disable_mask;
1501     NvU32 i, j;
1502     NvU32 idx_link;
1503 
1504     //
1505     // Now that software knows the devices and addresses, it must take all
1506     // the wrapper modules out of reset.  It does this by writing to the
1507     // PMC module enable registers.
1508     //
1509 
1510 // Temporary - bug 2069764
1511 //    NVSWITCH_REG_WR32(device, _PSMC, _ENABLE,
1512 //        DRF_DEF(_PSMC, _ENABLE, _SAW, _ENABLE) |
1513 //        DRF_DEF(_PSMC, _ENABLE, _PRIV_RING, _ENABLE) |
1514 //        DRF_DEF(_PSMC, _ENABLE, _PERFMON, _ENABLE));
1515 
1516     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE,
1517         DRF_DEF(_NVLSAW_NVSPMC, _ENABLE, _NXBAR, _ENABLE));
1518 
1519     //
1520     // At this point the list of discovered devices has been cross-referenced
1521     // with the ROM configuration, platform configuration, and regkey override.
1522     // The NVLIPT & NPORT enable filtering done here further updates the MMIO
1523     // information based on KVM.
1524     //
1525 
1526     // Enable the NVLIPT units that have been discovered
1527     engine_enable_mask = 0;
1528     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NVLW, ); i++)
1529     {
1530         if (NVSWITCH_ENG_IS_VALID(device, NVLW, i))
1531         {
1532             engine_enable_mask |= NVBIT(i);
1533         }
1534     }
1535     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT, engine_enable_mask);
1536 
1537     //
1538     // In bare metal we write ENABLE_NVLIPT to enable the units that aren't
1539     // disabled by ROM configuration, platform configuration, or regkey override.
1540     // If we are running inside a VM, the hypervisor has already set ENABLE_NVLIPT
1541     // and write protected it.  Reading ENABLE_NVLIPT tells us which units we
1542     // are allowed to use inside this VM.
1543     //
1544     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT);
1545     if (engine_enable_mask != ~engine_disable_mask)
1546     {
1547         NVSWITCH_PRINT(device, WARN,
1548             "NV_NVLSAW_NVSPMC_ENABLE_NVLIPT mismatch: wrote 0x%x, read 0x%x\n",
1549             engine_enable_mask,
1550             ~engine_disable_mask);
1551         NVSWITCH_PRINT(device, WARN,
1552             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NVLIPT readback until supported on fmodel\n");
1553         engine_disable_mask = ~engine_enable_mask;
1554     }
1555     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NVLW, )) - 1;
1556     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1557     {
1558         chip_device->engNVLW[i].valid = NV_FALSE;
1559         for (j = 0; j < NVSWITCH_LINKS_PER_NVLW; j++)
1560         {
1561             idx_link = i * NVSWITCH_LINKS_PER_NVLW + j;
1562             if (idx_link < NVSWITCH_LINK_COUNT(device))
1563             {
1564                 device->link[idx_link].valid = NV_FALSE;
1565                 //
1566                 // TODO: This invalidate used to also invalidate all the
1567                 // associated NVLW engFOO units. This is probably not necessary
1568                 // but code that bypasses the link valid check might touch the
1569                 // underlying units when they are not supposed to.
1570                 //
1571             }
1572         }
1573     }
1574     FOR_EACH_INDEX_IN_MASK_END;
1575 
1576     // Enable the NPORT units that have been discovered
1577     engine_enable_mask = 0;
1578     for (i = 0; i < NVSWITCH_ENG_COUNT(device, NPG, ); i++)
1579     {
1580         if (NVSWITCH_ENG_IS_VALID(device, NPG, i))
1581         {
1582             engine_enable_mask |= NVBIT(i);
1583         }
1584     }
1585     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG, engine_enable_mask);
1586 
1587     //
1588     // In bare metal we write ENABLE_NPG to enable the units that aren't
1589     // disabled by ROM configuration, platform configuration, or regkey override.
1590     // If we are running inside a VM, the hypervisor has already set ENABLE_NPG
1591     // and write protected it.  Reading ENABLE_NPG tells us which units we
1592     // are allowed to use inside this VM.
1593     //
1594     engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG);
1595     if (engine_enable_mask != ~engine_disable_mask)
1596     {
1597         NVSWITCH_PRINT(device, WARN,
1598             "NV_NVLSAW_NVSPMC_ENABLE_NPG mismatch: wrote 0x%x, read 0x%x\n",
1599             engine_enable_mask,
1600             ~engine_disable_mask);
1601         NVSWITCH_PRINT(device, WARN,
1602             "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NPG readback until supported on fmodel\n");
1603         engine_disable_mask = ~engine_enable_mask;
1604     }
1605     engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NPG, )) - 1;
1606     FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask)
1607     {
1608         chip_device->engNPG[i].valid = NV_FALSE;
1609         for (j = 0; j < NVSWITCH_LINKS_PER_NPG; j++)
1610         {
1611             idx_link = i * NVSWITCH_LINKS_PER_NPG + j;
1612 
1613             if (idx_link < NVSWITCH_LINK_COUNT(device))
1614             {
1615                 device->link[idx_link].valid = NV_FALSE;
1616                 //
1617                 // TODO: This invalidate used to also invalidate all the
1618                 // associated NPG engFOO units. This is probably not necessary
1619                 // but code that bypasses the link valid check might touch the
1620                 // underlying units when they are not supposed to.
1621                 //
1622             }
1623         }
1624     }
1625     FOR_EACH_INDEX_IN_MASK_END;
1626 
1627     return NVL_SUCCESS;
1628 }
1629 
1630 //
1631 // Bring units out of warm reset on boot.  Used by driver load.
1632 //
1633 void
nvswitch_init_warm_reset_lr10(nvswitch_device * device)1634 nvswitch_init_warm_reset_lr10
1635 (
1636     nvswitch_device *device
1637 )
1638 {
1639     NvU32 idx_npg;
1640     NvU32 idx_nport;
1641     NvU32 nport_mask;
1642     NvU32 nport_disable = 0;
1643 
1644 #if defined(NV_NPG_WARMRESET_NPORTDISABLE)
1645     nport_disable = DRF_NUM(_NPG, _WARMRESET, _NPORTDISABLE, ~nport_mask);
1646 #endif
1647 
1648     //
1649     // Walk the NPGs and build the mask of extant NPORTs
1650     //
1651     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
1652     {
1653         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
1654         {
1655             nport_mask = 0;
1656             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
1657             {
1658                 nport_mask |=
1659                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
1660                     NVBIT(idx_nport) : 0x0);
1661             }
1662 
1663             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
1664                 _NPG, _WARMRESET,
1665                 nport_disable |
1666                 DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, nport_mask));
1667         }
1668     }
1669 }
1670 
1671 /*
1672  * CTRL_NVSWITCH_SET_REMAP_POLICY
1673  */
1674 
1675 NvlStatus
nvswitch_get_remap_table_selector_lr10(nvswitch_device * device,NVSWITCH_TABLE_SELECT_REMAP table_selector,NvU32 * remap_ram_sel)1676 nvswitch_get_remap_table_selector_lr10
1677 (
1678     nvswitch_device *device,
1679     NVSWITCH_TABLE_SELECT_REMAP table_selector,
1680     NvU32 *remap_ram_sel
1681 )
1682 {
1683     NvU32 ram_sel = 0;
1684 
1685     switch (table_selector)
1686     {
1687         case NVSWITCH_TABLE_SELECT_REMAP_PRIMARY:
1688             ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM;
1689             break;
1690         default:
1691             // Unsupported remap table selector
1692             return -NVL_ERR_NOT_SUPPORTED;
1693             break;
1694     }
1695 
1696     if (remap_ram_sel)
1697     {
1698         *remap_ram_sel = ram_sel;
1699     }
1700 
1701     return NVL_SUCCESS;
1702 }
1703 
1704 NvU32
nvswitch_get_ingress_ram_size_lr10(nvswitch_device * device,NvU32 ingress_ram_selector)1705 nvswitch_get_ingress_ram_size_lr10
1706 (
1707     nvswitch_device *device,
1708     NvU32 ingress_ram_selector      // NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT*
1709 )
1710 {
1711     NvU32 ram_size = 0;
1712 
1713     switch (ingress_ram_selector)
1714     {
1715         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM:
1716             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_REMAPTAB_DEPTH + 1;
1717             break;
1718         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM:
1719             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH + 1;
1720             break;
1721         case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM:
1722             ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH + 1;
1723             break;
1724         default:
1725             // Unsupported ingress RAM selector
1726             break;
1727     }
1728 
1729     return ram_size;
1730 }
1731 
1732 static void
_nvswitch_set_remap_policy_lr10(nvswitch_device * device,NvU32 portNum,NvU32 firstIndex,NvU32 numEntries,NVSWITCH_REMAP_POLICY_ENTRY * remap_policy)1733 _nvswitch_set_remap_policy_lr10
1734 (
1735     nvswitch_device *device,
1736     NvU32 portNum,
1737     NvU32 firstIndex,
1738     NvU32 numEntries,
1739     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy
1740 )
1741 {
1742     NvU32 i;
1743     NvU32 remap_address;
1744     NvU32 address_offset;
1745     NvU32 address_base;
1746     NvU32 address_limit;
1747 
1748     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
1749         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
1750         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
1751         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
1752 
1753     for (i = 0; i < numEntries; i++)
1754     {
1755         // Set each field if enabled, else set it to 0.
1756         remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_policy[i].address);
1757         address_offset = DRF_VAL64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, remap_policy[i].addressOffset);
1758         address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, remap_policy[i].addressBase);
1759         address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, remap_policy[i].addressLimit);
1760 
1761         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA1,
1762             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) |
1763             DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk));
1764         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA2,
1765             DRF_NUM(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep) |
1766             DRF_NUM(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, address_offset));
1767         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA3,
1768             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_BASE, address_base) |
1769             DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, address_limit));
1770         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA4,
1771             DRF_NUM(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy[i].targetId) |
1772             DRF_NUM(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy[i].flags));
1773 
1774         // Write last and auto-increment
1775         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA0,
1776             DRF_NUM(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_address) |
1777             DRF_NUM(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) |
1778             DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid));
1779     }
1780 }
1781 
1782 NvlStatus
nvswitch_ctrl_set_remap_policy_lr10(nvswitch_device * device,NVSWITCH_SET_REMAP_POLICY * p)1783 nvswitch_ctrl_set_remap_policy_lr10
1784 (
1785     nvswitch_device *device,
1786     NVSWITCH_SET_REMAP_POLICY *p
1787 )
1788 {
1789     NvU32 i;
1790     NvU32 rfunc;
1791     NvU32 ram_size;
1792     NvlStatus retval = NVL_SUCCESS;
1793 
1794     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
1795     {
1796         NVSWITCH_PRINT(device, ERROR,
1797             "NPORT port #%d not valid\n",
1798             p->portNum);
1799         return -NVL_BAD_ARGS;
1800     }
1801 
1802     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
1803     {
1804         NVSWITCH_PRINT(device, ERROR,
1805             "Remap table #%d not supported\n",
1806             p->tableSelect);
1807         return -NVL_ERR_NOT_SUPPORTED;
1808     }
1809 
1810     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
1811     if ((p->firstIndex >= ram_size) ||
1812         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
1813         (p->firstIndex + p->numEntries > ram_size))
1814     {
1815         NVSWITCH_PRINT(device, ERROR,
1816             "remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
1817             p->firstIndex, p->firstIndex + p->numEntries - 1,
1818             0, ram_size - 1,
1819             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
1820         return -NVL_BAD_ARGS;
1821     }
1822 
1823     for (i = 0; i < p->numEntries; i++)
1824     {
1825         if (p->remapPolicy[i].targetId &
1826             ~DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID))
1827         {
1828             NVSWITCH_PRINT(device, ERROR,
1829                 "remapPolicy[%d].targetId 0x%x out of valid range (0x%x..0x%x)\n",
1830                 i, p->remapPolicy[i].targetId,
1831                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID));
1832             return -NVL_BAD_ARGS;
1833         }
1834 
1835         if (p->remapPolicy[i].irlSelect &
1836             ~DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL))
1837         {
1838             NVSWITCH_PRINT(device, ERROR,
1839                 "remapPolicy[%d].irlSelect 0x%x out of valid range (0x%x..0x%x)\n",
1840                 i, p->remapPolicy[i].irlSelect,
1841                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL));
1842             return -NVL_BAD_ARGS;
1843         }
1844 
1845         rfunc = p->remapPolicy[i].flags &
1846             (
1847                 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR |
1848                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK |
1849                 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE |
1850                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE |
1851                 NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET
1852             );
1853         if (rfunc != p->remapPolicy[i].flags)
1854         {
1855             NVSWITCH_PRINT(device, ERROR,
1856                 "remapPolicy[%d].flags 0x%x has undefined flags (0x%x)\n",
1857                 i, p->remapPolicy[i].flags,
1858                 p->remapPolicy[i].flags ^ rfunc);
1859             return -NVL_BAD_ARGS;
1860         }
1861 
1862         // Validate that only bits 46:36 are used
1863         if (p->remapPolicy[i].address &
1864             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10))
1865         {
1866             NVSWITCH_PRINT(device, ERROR,
1867                 "remapPolicy[%d].address 0x%llx & ~0x%llx != 0\n",
1868                 i, p->remapPolicy[i].address,
1869                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10));
1870             return -NVL_BAD_ARGS;
1871         }
1872 
1873         if (p->remapPolicy[i].reqCtxMask &
1874            ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK))
1875         {
1876             NVSWITCH_PRINT(device, ERROR,
1877                 "remapPolicy[%d].reqCtxMask 0x%x out of valid range (0x%x..0x%x)\n",
1878                 i, p->remapPolicy[i].reqCtxMask,
1879                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK));
1880             return -NVL_BAD_ARGS;
1881         }
1882 
1883         if (p->remapPolicy[i].reqCtxChk &
1884             ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK))
1885         {
1886             NVSWITCH_PRINT(device, ERROR,
1887                 "remapPolicy[%d].reqCtxChk 0x%x out of valid range (0x%x..0x%x)\n",
1888                 i, p->remapPolicy[i].reqCtxChk,
1889                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK));
1890             return -NVL_BAD_ARGS;
1891         }
1892 
1893         if (p->remapPolicy[i].reqCtxRep &
1894             ~DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP))
1895         {
1896             NVSWITCH_PRINT(device, ERROR,
1897                 "remapPolicy[%d].reqCtxRep 0x%x out of valid range (0x%x..0x%x)\n",
1898                 i, p->remapPolicy[i].reqCtxRep,
1899                 0, DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP));
1900             return -NVL_BAD_ARGS;
1901         }
1902 
1903         if ((p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET) &&
1904             !(p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE))
1905         {
1906             NVSWITCH_PRINT(device, ERROR,
1907                 "remapPolicy[%d].flags: _FLAGS_ADR_OFFSET should not be set if "
1908                 "_FLAGS_ADR_BASE is not set\n",
1909                 i);
1910             return -NVL_BAD_ARGS;
1911         }
1912 
1913         // Validate that only bits 35:20 are used
1914         if (p->remapPolicy[i].addressBase &
1915             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10))
1916         {
1917             NVSWITCH_PRINT(device, ERROR,
1918                 "remapPolicy[%d].addressBase 0x%llx & ~0x%llx != 0\n",
1919                 i, p->remapPolicy[i].addressBase,
1920                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10));
1921             return -NVL_BAD_ARGS;
1922         }
1923 
1924         // Validate that only bits 35:20 are used
1925         if (p->remapPolicy[i].addressLimit &
1926             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10))
1927         {
1928             NVSWITCH_PRINT(device, ERROR,
1929                  "remapPolicy[%d].addressLimit 0x%llx & ~0x%llx != 0\n",
1930                  i, p->remapPolicy[i].addressLimit,
1931                  DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10));
1932             return -NVL_BAD_ARGS;
1933         }
1934 
1935         // Validate base & limit describe a region
1936         if (p->remapPolicy[i].addressBase > p->remapPolicy[i].addressLimit)
1937         {
1938             NVSWITCH_PRINT(device, ERROR,
1939                  "remapPolicy[%d].addressBase/Limit invalid: 0x%llx > 0x%llx\n",
1940                  i, p->remapPolicy[i].addressBase, p->remapPolicy[i].addressLimit);
1941             return -NVL_BAD_ARGS;
1942         }
1943 
1944         // Validate that only bits 35:20 are used
1945         if (p->remapPolicy[i].addressOffset &
1946             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1947         {
1948             NVSWITCH_PRINT(device, ERROR,
1949                 "remapPolicy[%d].addressOffset 0x%llx & ~0x%llx != 0\n",
1950                 i, p->remapPolicy[i].addressOffset,
1951                 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10));
1952             return -NVL_BAD_ARGS;
1953         }
1954 
1955         // Validate limit - base + offset doesn't overflow 64G
1956         if ((p->remapPolicy[i].addressLimit - p->remapPolicy[i].addressBase +
1957                 p->remapPolicy[i].addressOffset) &
1958             ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10))
1959         {
1960             NVSWITCH_PRINT(device, ERROR,
1961                 "remapPolicy[%d].addressLimit 0x%llx - addressBase 0x%llx + "
1962                 "addressOffset 0x%llx overflows 64GB\n",
1963                 i, p->remapPolicy[i].addressLimit, p->remapPolicy[i].addressBase,
1964                 p->remapPolicy[i].addressOffset);
1965             return -NVL_BAD_ARGS;
1966         }
1967     }
1968 
1969     _nvswitch_set_remap_policy_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->remapPolicy);
1970 
1971     return retval;
1972 }
1973 
1974 /*
1975  * CTRL_NVSWITCH_GET_REMAP_POLICY
1976  */
1977 
1978 #define NVSWITCH_NUM_REMAP_POLICY_REGS_LR10 5
1979 
1980 NvlStatus
nvswitch_ctrl_get_remap_policy_lr10(nvswitch_device * device,NVSWITCH_GET_REMAP_POLICY_PARAMS * params)1981 nvswitch_ctrl_get_remap_policy_lr10
1982 (
1983     nvswitch_device *device,
1984     NVSWITCH_GET_REMAP_POLICY_PARAMS *params
1985 )
1986 {
1987     NVSWITCH_REMAP_POLICY_ENTRY *remap_policy;
1988     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
1989     NvU32 table_index;
1990     NvU32 remap_count;
1991     NvU32 remap_address;
1992     NvU32 address_offset;
1993     NvU32 address_base;
1994     NvU32 address_limit;
1995     NvU32 ram_size;
1996 
1997     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
1998     {
1999         NVSWITCH_PRINT(device, ERROR,
2000             "NPORT port #%d not valid\n",
2001             params->portNum);
2002         return -NVL_BAD_ARGS;
2003     }
2004 
2005     if (params->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2006     {
2007         NVSWITCH_PRINT(device, ERROR,
2008             "Remap table #%d not supported\n",
2009             params->tableSelect);
2010         return -NVL_ERR_NOT_SUPPORTED;
2011     }
2012 
2013     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2014     if ((params->firstIndex >= ram_size))
2015     {
2016         NVSWITCH_PRINT(device, ERROR,
2017             "%s: remapPolicy first index %d out of range[%d..%d].\n",
2018             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2019         return -NVL_BAD_ARGS;
2020     }
2021 
2022     nvswitch_os_memset(params->entry, 0, (NVSWITCH_REMAP_POLICY_ENTRIES_MAX *
2023         sizeof(NVSWITCH_REMAP_POLICY_ENTRY)));
2024 
2025     table_index = params->firstIndex;
2026     remap_policy = params->entry;
2027     remap_count = 0;
2028 
2029     /* set table offset */
2030     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2031         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2032         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2033         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2034 
2035     while (remap_count < NVSWITCH_REMAP_POLICY_ENTRIES_MAX &&
2036         table_index < ram_size)
2037     {
2038         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2039         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2040         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2041         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2042         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2043 
2044         /* add to remap_entries list if nonzero */
2045         if (remap_policy_data[0] || remap_policy_data[1] || remap_policy_data[2] ||
2046             remap_policy_data[3] || remap_policy_data[4])
2047         {
2048             remap_policy[remap_count].irlSelect =
2049                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy_data[0]);
2050 
2051             remap_policy[remap_count].entryValid =
2052                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy_data[0]);
2053 
2054             remap_address =
2055                 DRF_VAL(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_policy_data[0]);
2056 
2057             remap_policy[remap_count].address =
2058                 DRF_NUM64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_address);
2059 
2060             remap_policy[remap_count].reqCtxMask =
2061                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy_data[1]);
2062 
2063             remap_policy[remap_count].reqCtxChk =
2064                 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy_data[1]);
2065 
2066             remap_policy[remap_count].reqCtxRep =
2067                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy_data[2]);
2068 
2069             address_offset =
2070                 DRF_VAL(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, remap_policy_data[2]);
2071 
2072             remap_policy[remap_count].addressOffset =
2073                 DRF_NUM64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, address_offset);
2074 
2075             address_base =
2076                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_BASE, remap_policy_data[3]);
2077 
2078             remap_policy[remap_count].addressBase =
2079                 DRF_NUM64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, address_base);
2080 
2081             address_limit =
2082                 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, remap_policy_data[3]);
2083 
2084             remap_policy[remap_count].addressLimit =
2085                 DRF_NUM64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, address_limit);
2086 
2087             remap_policy[remap_count].targetId =
2088                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy_data[4]);
2089 
2090             remap_policy[remap_count].flags =
2091                 DRF_VAL(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy_data[4]);
2092 
2093             remap_count++;
2094         }
2095 
2096         table_index++;
2097     }
2098 
2099     params->nextIndex = table_index;
2100     params->numEntries = remap_count;
2101 
2102     return NVL_SUCCESS;
2103 }
2104 
2105 /*
2106  * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID
2107  */
2108 NvlStatus
nvswitch_ctrl_set_remap_policy_valid_lr10(nvswitch_device * device,NVSWITCH_SET_REMAP_POLICY_VALID * p)2109 nvswitch_ctrl_set_remap_policy_valid_lr10
2110 (
2111     nvswitch_device *device,
2112     NVSWITCH_SET_REMAP_POLICY_VALID *p
2113 )
2114 {
2115     NvU32 remap_ram;
2116     NvU32 ram_address = p->firstIndex;
2117     NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables
2118     NvU32 i;
2119     NvU32 ram_size;
2120 
2121     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2122     {
2123         NVSWITCH_PRINT(device, ERROR,
2124             "%s: NPORT port #%d not valid\n",
2125             __FUNCTION__, p->portNum);
2126         return -NVL_BAD_ARGS;
2127     }
2128 
2129     if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY)
2130     {
2131         NVSWITCH_PRINT(device, ERROR,
2132             "Remap table #%d not supported\n",
2133             p->tableSelect);
2134         return -NVL_ERR_NOT_SUPPORTED;
2135     }
2136 
2137     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM);
2138     if ((p->firstIndex >= ram_size) ||
2139         (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) ||
2140         (p->firstIndex + p->numEntries > ram_size))
2141     {
2142         NVSWITCH_PRINT(device, ERROR,
2143             "%s: remapPolicy[%d..%d] overflows range %d..%d or size %d.\n",
2144             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2145             0, ram_size - 1,
2146             NVSWITCH_REMAP_POLICY_ENTRIES_MAX);
2147         return -NVL_BAD_ARGS;
2148     }
2149 
2150     // Select REMAPPOLICY RAM and disable Auto Increament.
2151     remap_ram =
2152         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) |
2153         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2154 
2155     for (i = 0; i < p->numEntries; i++)
2156     {
2157         /* set the ram address */
2158         remap_ram = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, remap_ram);
2159         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, remap_ram);
2160 
2161         remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0);
2162         remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1);
2163         remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2);
2164         remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3);
2165         remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4);
2166 
2167         // Set valid bit in REMAPTABDATA0.
2168         remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]);
2169 
2170         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4, remap_policy_data[4]);
2171         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3, remap_policy_data[3]);
2172         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2, remap_policy_data[2]);
2173         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1, remap_policy_data[1]);
2174         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0, remap_policy_data[0]);
2175     }
2176 
2177     return NVL_SUCCESS;
2178 }
2179 
2180 //
2181 // Programming invalid entries to 0x3F causes Route block to detect an invalid port number
2182 // and flag a PRIV error to the FM. (See Table 14.RID RAM Programming, IAS 3.3.4)
2183 //
2184 
2185 #define NVSWITCH_INVALID_PORT_VAL_LR10   0x3F
2186 #define NVSWITCH_INVALID_VC_VAL_LR10     0x0
2187 
2188 #define NVSWITCH_PORTLIST_PORT_LR10(_entry, _idx) \
2189     ((_idx < _entry.numEntries) ? _entry.portList[_idx].destPortNum : NVSWITCH_INVALID_PORT_VAL_LR10)
2190 
2191 #define NVSWITCH_PORTLIST_VC_LR10(_entry, _idx) \
2192     ((_idx < _entry.numEntries) ? _entry.portList[_idx].vcMap : NVSWITCH_INVALID_VC_VAL_LR10)
2193 
2194 /*
2195  * CTRL_NVSWITCH_SET_ROUTING_ID
2196  */
2197 
2198 static void
_nvswitch_set_routing_id_lr10(nvswitch_device * device,NvU32 portNum,NvU32 firstIndex,NvU32 numEntries,NVSWITCH_ROUTING_ID_ENTRY * routing_id)2199 _nvswitch_set_routing_id_lr10
2200 (
2201     nvswitch_device *device,
2202     NvU32 portNum,
2203     NvU32 firstIndex,
2204     NvU32 numEntries,
2205     NVSWITCH_ROUTING_ID_ENTRY *routing_id
2206 )
2207 {
2208     NvU32 i;
2209     NvU32 rmod;
2210 
2211     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2212         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2213         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2214         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2215 
2216     for (i = 0; i < numEntries; i++)
2217     {
2218         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA1,
2219             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT3,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 3)) |
2220             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE3, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 3))   |
2221             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT4,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 4)) |
2222             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE4, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 4))   |
2223             DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT5,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 5)) |
2224             DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE5, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 5)));
2225 
2226         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA2,
2227             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT6,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 6)) |
2228             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE6, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 6))   |
2229             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT7,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 7)) |
2230             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE7, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 7))   |
2231             DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT8,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 8)) |
2232             DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE8, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 8)));
2233 
2234         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA3,
2235             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT9,     NVSWITCH_PORTLIST_PORT_LR10(routing_id[i],  9)) |
2236             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE9,  NVSWITCH_PORTLIST_VC_LR10(routing_id[i],  9))   |
2237             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT10,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 10)) |
2238             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE10, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 10))   |
2239             DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT11,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 11)) |
2240             DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE11, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 11)));
2241 
2242         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA4,
2243             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT12,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 12)) |
2244             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE12, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 12))   |
2245             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT13,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 13)) |
2246             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE13, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 13))   |
2247             DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT14,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 14)) |
2248             DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE14, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 14)));
2249 
2250         rmod =
2251             (routing_id[i].useRoutingLan ? NVBIT(6) : 0) |
2252             (routing_id[i].enableIrlErrResponse ? NVBIT(9) : 0);
2253 
2254         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA5,
2255             DRF_NUM(_INGRESS, _RIDTABDATA5, _PORT15,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 15)) |
2256             DRF_NUM(_INGRESS, _RIDTABDATA5, _VC_MODE15, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 15))   |
2257             DRF_NUM(_INGRESS, _RIDTABDATA5, _RMOD,      rmod)                                           |
2258             DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,  routing_id[i].entryValid));
2259 
2260         NVSWITCH_ASSERT(routing_id[i].numEntries <= 16);
2261         // Write last and auto-increment
2262         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA0,
2263             DRF_NUM(_INGRESS, _RIDTABDATA0, _GSIZE,
2264                 (routing_id[i].numEntries == 16) ? 0x0 : routing_id[i].numEntries) |
2265             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT0,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 0)) |
2266             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE0, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 0))   |
2267             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT1,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 1)) |
2268             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE1, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 1))   |
2269             DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT2,    NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 2)) |
2270             DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE2, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 2)));
2271     }
2272 }
2273 
2274 #define NVSWITCH_NUM_RIDTABDATA_REGS_LR10 6
2275 
2276 NvlStatus
nvswitch_ctrl_get_routing_id_lr10(nvswitch_device * device,NVSWITCH_GET_ROUTING_ID_PARAMS * params)2277 nvswitch_ctrl_get_routing_id_lr10
2278 (
2279     nvswitch_device *device,
2280     NVSWITCH_GET_ROUTING_ID_PARAMS *params
2281 )
2282 {
2283     NVSWITCH_ROUTING_ID_IDX_ENTRY *rid_entries;
2284     NvU32 table_index;
2285     NvU32 rid_tab_data[NVSWITCH_NUM_RIDTABDATA_REGS_LR10]; // 6 RID tables
2286     NvU32 rid_count;
2287     NvU32 rmod;
2288     NvU32 gsize;
2289     NvU32 ram_size;
2290 
2291     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2292     {
2293         NVSWITCH_PRINT(device, ERROR,
2294             "%s: NPORT port #%d not valid\n",
2295             __FUNCTION__, params->portNum);
2296         return -NVL_BAD_ARGS;
2297     }
2298 
2299     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2300     if (params->firstIndex >= ram_size)
2301     {
2302         NVSWITCH_PRINT(device, ERROR,
2303             "%s: routingId first index %d out of range[%d..%d].\n",
2304             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2305         return -NVL_BAD_ARGS;
2306     }
2307 
2308     nvswitch_os_memset(params->entries, 0, sizeof(params->entries));
2309 
2310     table_index = params->firstIndex;
2311     rid_entries = params->entries;
2312     rid_count = 0;
2313 
2314     /* set table offset */
2315     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2316         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2317         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2318         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2319 
2320     while (rid_count < NVSWITCH_ROUTING_ID_ENTRIES_MAX &&
2321            table_index < ram_size)
2322     {
2323         rid_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2324         rid_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2325         rid_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2326         rid_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2327         rid_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2328         rid_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2329 
2330         /* add to rid_entries list if nonzero */
2331         if (rid_tab_data[0] || rid_tab_data[1] || rid_tab_data[2] ||
2332             rid_tab_data[3] || rid_tab_data[4] || rid_tab_data[5])
2333         {
2334             rid_entries[rid_count].entry.portList[0].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT0, rid_tab_data[0]);
2335             rid_entries[rid_count].entry.portList[0].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE0, rid_tab_data[0]);
2336 
2337             rid_entries[rid_count].entry.portList[1].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT1, rid_tab_data[0]);
2338             rid_entries[rid_count].entry.portList[1].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE1, rid_tab_data[0]);
2339 
2340             rid_entries[rid_count].entry.portList[2].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT2, rid_tab_data[0]);
2341             rid_entries[rid_count].entry.portList[2].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE2, rid_tab_data[0]);
2342 
2343             rid_entries[rid_count].entry.portList[3].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT3, rid_tab_data[1]);
2344             rid_entries[rid_count].entry.portList[3].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE3, rid_tab_data[1]);
2345 
2346             rid_entries[rid_count].entry.portList[4].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT4, rid_tab_data[1]);
2347             rid_entries[rid_count].entry.portList[4].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE4, rid_tab_data[1]);
2348 
2349             rid_entries[rid_count].entry.portList[5].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT5, rid_tab_data[1]);
2350             rid_entries[rid_count].entry.portList[5].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE5, rid_tab_data[1]);
2351 
2352             rid_entries[rid_count].entry.portList[6].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT6, rid_tab_data[2]);
2353             rid_entries[rid_count].entry.portList[6].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE6, rid_tab_data[2]);
2354 
2355             rid_entries[rid_count].entry.portList[7].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT7, rid_tab_data[2]);
2356             rid_entries[rid_count].entry.portList[7].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE7, rid_tab_data[2]);
2357 
2358             rid_entries[rid_count].entry.portList[8].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT8, rid_tab_data[2]);
2359             rid_entries[rid_count].entry.portList[8].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE8, rid_tab_data[2]);
2360 
2361             rid_entries[rid_count].entry.portList[9].destPortNum  = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT9, rid_tab_data[3]);
2362             rid_entries[rid_count].entry.portList[9].vcMap        = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE9, rid_tab_data[3]);
2363 
2364             rid_entries[rid_count].entry.portList[10].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT10, rid_tab_data[3]);
2365             rid_entries[rid_count].entry.portList[10].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE10, rid_tab_data[3]);
2366 
2367             rid_entries[rid_count].entry.portList[11].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT11, rid_tab_data[3]);
2368             rid_entries[rid_count].entry.portList[11].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE11, rid_tab_data[3]);
2369 
2370             rid_entries[rid_count].entry.portList[12].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT12, rid_tab_data[4]);
2371             rid_entries[rid_count].entry.portList[12].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE12, rid_tab_data[4]);
2372 
2373             rid_entries[rid_count].entry.portList[13].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT13, rid_tab_data[4]);
2374             rid_entries[rid_count].entry.portList[13].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE13, rid_tab_data[4]);
2375 
2376             rid_entries[rid_count].entry.portList[14].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT14, rid_tab_data[4]);
2377             rid_entries[rid_count].entry.portList[14].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE14, rid_tab_data[4]);
2378 
2379             rid_entries[rid_count].entry.portList[15].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA5, _PORT15, rid_tab_data[5]);
2380             rid_entries[rid_count].entry.portList[15].vcMap       = DRF_VAL(_INGRESS, _RIDTABDATA5, _VC_MODE15, rid_tab_data[5]);
2381             rid_entries[rid_count].entry.entryValid               = DRF_VAL(_INGRESS, _RIDTABDATA5, _ACLVALID, rid_tab_data[5]);
2382 
2383             rmod = DRF_VAL(_INGRESS, _RIDTABDATA5, _RMOD, rid_tab_data[5]);
2384             rid_entries[rid_count].entry.useRoutingLan = (NVBIT(6) & rmod) ? 1 : 0;
2385             rid_entries[rid_count].entry.enableIrlErrResponse = (NVBIT(9) & rmod) ? 1 : 0;
2386 
2387             // Gsize of 16 falls into the 0th entry of GLT region. The _GSIZE field must be mapped accordingly
2388             // to the number of port entries (See IAS, Table 20, Sect 3.4.2.2. Packet Routing).
2389             gsize = DRF_VAL(_INGRESS, _RIDTABDATA0, _GSIZE, rid_tab_data[0]);
2390             rid_entries[rid_count].entry.numEntries = ((gsize == 0) ? 16 : gsize);
2391 
2392             rid_entries[rid_count].idx = table_index;
2393             rid_count++;
2394         }
2395 
2396         table_index++;
2397     }
2398 
2399     params->nextIndex = table_index;
2400     params->numEntries = rid_count;
2401 
2402     return NVL_SUCCESS;
2403 }
2404 
2405 NvlStatus
nvswitch_ctrl_set_routing_id_valid_lr10(nvswitch_device * device,NVSWITCH_SET_ROUTING_ID_VALID * p)2406 nvswitch_ctrl_set_routing_id_valid_lr10
2407 (
2408     nvswitch_device *device,
2409     NVSWITCH_SET_ROUTING_ID_VALID *p
2410 )
2411 {
2412     NvU32 rid_ctrl;
2413     NvU32 rid_tab_data0;
2414     NvU32 rid_tab_data1;
2415     NvU32 rid_tab_data2;
2416     NvU32 rid_tab_data3;
2417     NvU32 rid_tab_data4;
2418     NvU32 rid_tab_data5;
2419     NvU32 ram_address = p->firstIndex;
2420     NvU32 i;
2421     NvU32 ram_size;
2422 
2423     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2424     {
2425         NVSWITCH_PRINT(device, ERROR,
2426             "%s: NPORT port #%d not valid\n",
2427             __FUNCTION__, p->portNum);
2428         return -NVL_BAD_ARGS;
2429     }
2430 
2431     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2432     if ((p->firstIndex >= ram_size) ||
2433         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2434         (p->firstIndex + p->numEntries > ram_size))
2435     {
2436         NVSWITCH_PRINT(device, ERROR,
2437             "%s: routingId[%d..%d] overflows range %d..%d or size %d.\n",
2438             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2439             0, ram_size - 1,
2440             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2441         return -NVL_BAD_ARGS;
2442     }
2443 
2444     // Select RID RAM and disable Auto Increment.
2445     rid_ctrl =
2446         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) |
2447         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2448 
2449 
2450     for (i = 0; i < p->numEntries; i++)
2451     {
2452         /* set the ram address */
2453         rid_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rid_ctrl);
2454         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rid_ctrl);
2455 
2456         rid_tab_data0 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0);
2457         rid_tab_data1 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1);
2458         rid_tab_data2 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2);
2459         rid_tab_data3 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3);
2460         rid_tab_data4 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4);
2461         rid_tab_data5 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5);
2462 
2463         // Set the valid bit in _RIDTABDATA5
2464         rid_tab_data5 = FLD_SET_DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID,
2465             p->entryValid[i], rid_tab_data5);
2466 
2467         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1, rid_tab_data1);
2468         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2, rid_tab_data2);
2469         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3, rid_tab_data3);
2470         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4, rid_tab_data4);
2471         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5, rid_tab_data5);
2472         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0, rid_tab_data0);
2473     }
2474 
2475     return NVL_SUCCESS;
2476 }
2477 
2478 NvlStatus
nvswitch_ctrl_set_routing_id_lr10(nvswitch_device * device,NVSWITCH_SET_ROUTING_ID * p)2479 nvswitch_ctrl_set_routing_id_lr10
2480 (
2481     nvswitch_device *device,
2482     NVSWITCH_SET_ROUTING_ID *p
2483 )
2484 {
2485     NvU32 i, j;
2486     NvlStatus retval = NVL_SUCCESS;
2487     NvU32 ram_size;
2488 
2489     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2490     {
2491         NVSWITCH_PRINT(device, ERROR,
2492             "NPORT port #%d not valid\n",
2493             p->portNum);
2494         return -NVL_BAD_ARGS;
2495     }
2496 
2497     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
2498     if ((p->firstIndex >= ram_size) ||
2499         (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) ||
2500         (p->firstIndex + p->numEntries > ram_size))
2501     {
2502         NVSWITCH_PRINT(device, ERROR,
2503             "routingId[%d..%d] overflows range %d..%d or size %d.\n",
2504             p->firstIndex, p->firstIndex + p->numEntries - 1,
2505             0, ram_size - 1,
2506             NVSWITCH_ROUTING_ID_ENTRIES_MAX);
2507         return -NVL_BAD_ARGS;
2508     }
2509 
2510     for (i = 0; i < p->numEntries; i++)
2511     {
2512         if ((p->routingId[i].numEntries < 1) ||
2513             (p->routingId[i].numEntries > NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX))
2514         {
2515             NVSWITCH_PRINT(device, ERROR,
2516                 "routingId[%d].portList[] size %d overflows range %d..%d\n",
2517                 i, p->routingId[i].numEntries,
2518                 1, NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX);
2519             return -NVL_BAD_ARGS;
2520         }
2521 
2522         for (j = 0; j < p->routingId[i].numEntries; j++)
2523         {
2524             if (p->routingId[i].portList[j].vcMap > DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0))
2525             {
2526                 NVSWITCH_PRINT(device, ERROR,
2527                     "routingId[%d].portList[%d] vcMap 0x%x out of valid range (0x%x..0x%x)\n",
2528                     i, j,
2529                     p->routingId[i].portList[j].vcMap,
2530                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0));
2531                 return -NVL_BAD_ARGS;
2532             }
2533 
2534             if (p->routingId[i].portList[j].destPortNum > DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0))
2535             {
2536                 NVSWITCH_PRINT(device, ERROR,
2537                     "routingId[%d].portList[%d] destPortNum 0x%x out of valid range (0x%x..0x%x)\n",
2538                     i, j,
2539                     p->routingId[i].portList[j].destPortNum,
2540                     0, DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0));
2541                 return -NVL_BAD_ARGS;
2542             }
2543         }
2544     }
2545 
2546     _nvswitch_set_routing_id_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingId);
2547 
2548     return retval;
2549 }
2550 
2551 /*
2552  * CTRL_NVSWITCH_SET_ROUTING_LAN
2553  */
2554 
2555 //
2556 // Check the data field is present in the list.  Return either the data field
2557 // or default if not present.
2558 //
2559 #define NVSWITCH_PORTLIST_VALID_LR10(_entry, _idx, _field, _default) \
2560     ((_idx < _entry.numEntries) ? _entry.portList[_idx]._field  : _default)
2561 
2562 static void
_nvswitch_set_routing_lan_lr10(nvswitch_device * device,NvU32 portNum,NvU32 firstIndex,NvU32 numEntries,NVSWITCH_ROUTING_LAN_ENTRY * routing_lan)2563 _nvswitch_set_routing_lan_lr10
2564 (
2565     nvswitch_device *device,
2566     NvU32 portNum,
2567     NvU32 firstIndex,
2568     NvU32 numEntries,
2569     NVSWITCH_ROUTING_LAN_ENTRY *routing_lan
2570 )
2571 {
2572     NvU32 i;
2573 
2574     NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2575         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) |
2576         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2577         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2578 
2579     for (i = 0; i < numEntries; i++)
2580     {
2581         //
2582         // NOTE: The GRP_SIZE field is 4-bits.  A subgroup is size 1 through 16
2583         // with encoding 0x0=16 and 0x1=1, ..., 0xF=15.
2584         // Programming of GRP_SIZE takes advantage of the inherent masking of
2585         // DRF_NUM to truncate 16 to 0.
2586         // See bug #3300673
2587         //
2588 
2589         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA1,
2590             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSelect, 0)) |
2591             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSize, 1)) |
2592             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSelect, 0)) |
2593             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSize, 1)) |
2594             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSelect, 0)) |
2595             DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSize, 1)));
2596 
2597         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA2,
2598             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSelect, 0)) |
2599             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSize, 1)) |
2600             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSelect, 0)) |
2601             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSize, 1)) |
2602             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSelect, 0)) |
2603             DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSize, 1)));
2604 
2605         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA3,
2606             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSelect, 0)) |
2607             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSize, 1)) |
2608             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSelect, 0)) |
2609             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSize, 1)) |
2610             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSelect, 0)) |
2611             DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSize, 1)));
2612 
2613         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA4,
2614             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSelect, 0)) |
2615             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSize, 1)) |
2616             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSelect, 0)) |
2617             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSize, 1)) |
2618             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSelect, 0)) |
2619             DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSize, 1)));
2620 
2621         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA5,
2622             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSelect, 0)) |
2623             DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSize, 1)) |
2624             DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,  routing_lan[i].entryValid));
2625 
2626         // Write last and auto-increment
2627         NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA0,
2628             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSelect, 0)) |
2629             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSize, 1)) |
2630             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSelect, 0)) |
2631             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSize, 1)) |
2632             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSelect, 0)) |
2633             DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSize, 1)));
2634     }
2635 }
2636 
2637 NvlStatus
nvswitch_ctrl_set_routing_lan_lr10(nvswitch_device * device,NVSWITCH_SET_ROUTING_LAN * p)2638 nvswitch_ctrl_set_routing_lan_lr10
2639 (
2640     nvswitch_device *device,
2641     NVSWITCH_SET_ROUTING_LAN *p
2642 )
2643 {
2644     NvU32 i, j;
2645     NvlStatus retval = NVL_SUCCESS;
2646     NvU32 ram_size;
2647 
2648     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2649     {
2650         NVSWITCH_PRINT(device, ERROR,
2651             "%s: NPORT port #%d not valid\n",
2652             __FUNCTION__, p->portNum);
2653         return -NVL_BAD_ARGS;
2654     }
2655 
2656     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2657     if ((p->firstIndex >= ram_size) ||
2658         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2659         (p->firstIndex + p->numEntries > ram_size))
2660     {
2661         NVSWITCH_PRINT(device, ERROR,
2662             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2663             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2664             0, ram_size - 1,
2665             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2666         return -NVL_BAD_ARGS;
2667     }
2668 
2669     for (i = 0; i < p->numEntries; i++)
2670     {
2671         if (p->routingLan[i].numEntries > NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX)
2672         {
2673             NVSWITCH_PRINT(device, ERROR,
2674                 "%s: routingLan[%d].portList[] size %d overflows range %d..%d\n",
2675                 __FUNCTION__, i, p->routingLan[i].numEntries,
2676                 0, NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX);
2677             return -NVL_BAD_ARGS;
2678         }
2679 
2680         for (j = 0; j < p->routingLan[i].numEntries; j++)
2681         {
2682             if (p->routingLan[i].portList[j].groupSelect > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0))
2683             {
2684                 NVSWITCH_PRINT(device, ERROR,
2685                     "%s: routingLan[%d].portList[%d] groupSelect 0x%x out of valid range (0x%x..0x%x)\n",
2686                     __FUNCTION__, i, j,
2687                     p->routingLan[i].portList[j].groupSelect,
2688                     0, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0));
2689                 return -NVL_BAD_ARGS;
2690             }
2691 
2692             if ((p->routingLan[i].portList[j].groupSize == 0) ||
2693                 (p->routingLan[i].portList[j].groupSize > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1))
2694             {
2695                 NVSWITCH_PRINT(device, ERROR,
2696                     "%s: routingLan[%d].portList[%d] groupSize 0x%x out of valid range (0x%x..0x%x)\n",
2697                     __FUNCTION__, i, j,
2698                     p->routingLan[i].portList[j].groupSize,
2699                     1, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1);
2700                 return -NVL_BAD_ARGS;
2701             }
2702         }
2703     }
2704 
2705     _nvswitch_set_routing_lan_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingLan);
2706 
2707     return retval;
2708 }
2709 
2710 #define NVSWITCH_NUM_RLANTABDATA_REGS_LR10 6
2711 
2712 NvlStatus
nvswitch_ctrl_get_routing_lan_lr10(nvswitch_device * device,NVSWITCH_GET_ROUTING_LAN_PARAMS * params)2713 nvswitch_ctrl_get_routing_lan_lr10
2714 (
2715     nvswitch_device *device,
2716     NVSWITCH_GET_ROUTING_LAN_PARAMS *params
2717 )
2718 {
2719     NVSWITCH_ROUTING_LAN_IDX_ENTRY *rlan_entries;
2720     NvU32 table_index;
2721     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2722     NvU32 rlan_count;
2723     NvU32 ram_size;
2724 
2725     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
2726     {
2727         NVSWITCH_PRINT(device, ERROR,
2728             "%s: NPORT port #%d not valid\n",
2729             __FUNCTION__, params->portNum);
2730         return -NVL_BAD_ARGS;
2731     }
2732 
2733     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2734     if ((params->firstIndex >= ram_size))
2735     {
2736         NVSWITCH_PRINT(device, ERROR,
2737             "%s: routingLan first index %d out of range[%d..%d].\n",
2738             __FUNCTION__, params->firstIndex, 0, ram_size - 1);
2739         return -NVL_BAD_ARGS;
2740     }
2741 
2742     nvswitch_os_memset(params->entries, 0, (NVSWITCH_ROUTING_LAN_ENTRIES_MAX *
2743         sizeof(NVSWITCH_ROUTING_LAN_IDX_ENTRY)));
2744 
2745     table_index = params->firstIndex;
2746     rlan_entries = params->entries;
2747     rlan_count = 0;
2748 
2749     /* set table offset */
2750     NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR,
2751         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) |
2752         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM)   |
2753         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1));
2754 
2755     while (rlan_count < NVSWITCH_ROUTING_LAN_ENTRIES_MAX &&
2756            table_index < ram_size)
2757     {
2758         /* read one entry */
2759         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2760         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2761         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2762         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2763         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2764         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2765 
2766         /* add to rlan_entries list if nonzero */
2767         if (rlan_tab_data[0] || rlan_tab_data[1] || rlan_tab_data[2] ||
2768             rlan_tab_data[3] || rlan_tab_data[4] || rlan_tab_data[5])
2769         {
2770             rlan_entries[rlan_count].entry.portList[0].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, rlan_tab_data[0]);
2771             rlan_entries[rlan_count].entry.portList[0].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, rlan_tab_data[0]);
2772             if (rlan_entries[rlan_count].entry.portList[0].groupSize == 0)
2773             {
2774                 rlan_entries[rlan_count].entry.portList[0].groupSize = 16;
2775             }
2776 
2777             rlan_entries[rlan_count].entry.portList[1].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, rlan_tab_data[0]);
2778             rlan_entries[rlan_count].entry.portList[1].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, rlan_tab_data[0]);
2779             if (rlan_entries[rlan_count].entry.portList[1].groupSize == 0)
2780             {
2781                 rlan_entries[rlan_count].entry.portList[1].groupSize = 16;
2782             }
2783 
2784             rlan_entries[rlan_count].entry.portList[2].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, rlan_tab_data[0]);
2785             rlan_entries[rlan_count].entry.portList[2].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, rlan_tab_data[0]);
2786             if (rlan_entries[rlan_count].entry.portList[2].groupSize == 0)
2787             {
2788                 rlan_entries[rlan_count].entry.portList[2].groupSize = 16;
2789             }
2790 
2791             rlan_entries[rlan_count].entry.portList[3].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, rlan_tab_data[1]);
2792             rlan_entries[rlan_count].entry.portList[3].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, rlan_tab_data[1]);
2793             if (rlan_entries[rlan_count].entry.portList[3].groupSize == 0)
2794             {
2795                 rlan_entries[rlan_count].entry.portList[3].groupSize = 16;
2796             }
2797 
2798             rlan_entries[rlan_count].entry.portList[4].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, rlan_tab_data[1]);
2799             rlan_entries[rlan_count].entry.portList[4].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, rlan_tab_data[1]);
2800             if (rlan_entries[rlan_count].entry.portList[4].groupSize == 0)
2801             {
2802                 rlan_entries[rlan_count].entry.portList[4].groupSize = 16;
2803             }
2804 
2805             rlan_entries[rlan_count].entry.portList[5].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, rlan_tab_data[1]);
2806             rlan_entries[rlan_count].entry.portList[5].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, rlan_tab_data[1]);
2807             if (rlan_entries[rlan_count].entry.portList[5].groupSize == 0)
2808             {
2809                 rlan_entries[rlan_count].entry.portList[5].groupSize = 16;
2810             }
2811 
2812             rlan_entries[rlan_count].entry.portList[6].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, rlan_tab_data[2]);
2813             rlan_entries[rlan_count].entry.portList[6].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, rlan_tab_data[2]);
2814             if (rlan_entries[rlan_count].entry.portList[6].groupSize == 0)
2815             {
2816                 rlan_entries[rlan_count].entry.portList[6].groupSize = 16;
2817             }
2818 
2819             rlan_entries[rlan_count].entry.portList[7].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, rlan_tab_data[2]);
2820             rlan_entries[rlan_count].entry.portList[7].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, rlan_tab_data[2]);
2821             if (rlan_entries[rlan_count].entry.portList[7].groupSize == 0)
2822             {
2823                 rlan_entries[rlan_count].entry.portList[7].groupSize = 16;
2824             }
2825 
2826             rlan_entries[rlan_count].entry.portList[8].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, rlan_tab_data[2]);
2827             rlan_entries[rlan_count].entry.portList[8].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, rlan_tab_data[2]);
2828             if (rlan_entries[rlan_count].entry.portList[8].groupSize == 0)
2829             {
2830                 rlan_entries[rlan_count].entry.portList[8].groupSize = 16;
2831             }
2832 
2833             rlan_entries[rlan_count].entry.portList[9].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, rlan_tab_data[3]);
2834             rlan_entries[rlan_count].entry.portList[9].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, rlan_tab_data[3]);
2835             if (rlan_entries[rlan_count].entry.portList[9].groupSize == 0)
2836             {
2837                 rlan_entries[rlan_count].entry.portList[9].groupSize = 16;
2838             }
2839 
2840             rlan_entries[rlan_count].entry.portList[10].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, rlan_tab_data[3]);
2841             rlan_entries[rlan_count].entry.portList[10].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, rlan_tab_data[3]);
2842             if (rlan_entries[rlan_count].entry.portList[10].groupSize == 0)
2843             {
2844                 rlan_entries[rlan_count].entry.portList[10].groupSize = 16;
2845             }
2846 
2847             rlan_entries[rlan_count].entry.portList[11].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, rlan_tab_data[3]);
2848             rlan_entries[rlan_count].entry.portList[11].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, rlan_tab_data[3]);
2849             if (rlan_entries[rlan_count].entry.portList[11].groupSize == 0)
2850             {
2851                 rlan_entries[rlan_count].entry.portList[11].groupSize = 16;
2852             }
2853 
2854             rlan_entries[rlan_count].entry.portList[12].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, rlan_tab_data[4]);
2855             rlan_entries[rlan_count].entry.portList[12].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, rlan_tab_data[4]);
2856             if (rlan_entries[rlan_count].entry.portList[12].groupSize == 0)
2857             {
2858                 rlan_entries[rlan_count].entry.portList[12].groupSize = 16;
2859             }
2860 
2861             rlan_entries[rlan_count].entry.portList[13].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, rlan_tab_data[4]);
2862             rlan_entries[rlan_count].entry.portList[13].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, rlan_tab_data[4]);
2863             if (rlan_entries[rlan_count].entry.portList[13].groupSize == 0)
2864             {
2865                 rlan_entries[rlan_count].entry.portList[13].groupSize = 16;
2866             }
2867 
2868             rlan_entries[rlan_count].entry.portList[14].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, rlan_tab_data[4]);
2869             rlan_entries[rlan_count].entry.portList[14].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, rlan_tab_data[4]);
2870             if (rlan_entries[rlan_count].entry.portList[14].groupSize == 0)
2871             {
2872                 rlan_entries[rlan_count].entry.portList[14].groupSize = 16;
2873             }
2874 
2875             rlan_entries[rlan_count].entry.portList[15].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, rlan_tab_data[5]);
2876             rlan_entries[rlan_count].entry.portList[15].groupSize   = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, rlan_tab_data[5]);
2877             if (rlan_entries[rlan_count].entry.portList[15].groupSize == 0)
2878             {
2879                 rlan_entries[rlan_count].entry.portList[15].groupSize = 16;
2880             }
2881 
2882             rlan_entries[rlan_count].entry.entryValid               = DRF_VAL(_INGRESS, _RLANTABDATA5, _ACLVALID, rlan_tab_data[5]);
2883             rlan_entries[rlan_count].entry.numEntries = NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX;
2884             rlan_entries[rlan_count].idx  = table_index;
2885 
2886             rlan_count++;
2887         }
2888 
2889         table_index++;
2890     }
2891 
2892     params->nextIndex  = table_index;
2893     params->numEntries = rlan_count;
2894 
2895     return NVL_SUCCESS;
2896 }
2897 
2898 NvlStatus
nvswitch_ctrl_set_routing_lan_valid_lr10(nvswitch_device * device,NVSWITCH_SET_ROUTING_LAN_VALID * p)2899 nvswitch_ctrl_set_routing_lan_valid_lr10
2900 (
2901     nvswitch_device *device,
2902     NVSWITCH_SET_ROUTING_LAN_VALID *p
2903 )
2904 {
2905     NvU32 rlan_ctrl;
2906     NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables
2907     NvU32 ram_address = p->firstIndex;
2908     NvU32 i;
2909     NvU32 ram_size;
2910 
2911     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum))
2912     {
2913         NVSWITCH_PRINT(device, ERROR,
2914             "%s: NPORT port #%d not valid\n",
2915             __FUNCTION__, p->portNum);
2916         return -NVL_BAD_ARGS;
2917     }
2918 
2919     ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
2920     if ((p->firstIndex >= ram_size) ||
2921         (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) ||
2922         (p->firstIndex + p->numEntries > ram_size))
2923     {
2924         NVSWITCH_PRINT(device, ERROR,
2925             "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n",
2926             __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1,
2927             0, ram_size - 1,
2928             NVSWITCH_ROUTING_LAN_ENTRIES_MAX);
2929         return -NVL_BAD_ARGS;
2930     }
2931 
2932     // Select RLAN RAM and disable Auto Increament.
2933     rlan_ctrl =
2934         DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) |
2935         DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0);
2936 
2937     for (i = 0; i < p->numEntries; i++)
2938     {
2939         /* set the RAM address */
2940         rlan_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rlan_ctrl);
2941         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rlan_ctrl);
2942 
2943         rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0);
2944         rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1);
2945         rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2);
2946         rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3);
2947         rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4);
2948         rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5);
2949 
2950         // Set the valid bit in _RLANTABDATA5
2951         rlan_tab_data[5] = FLD_SET_DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID,
2952             p->entryValid[i], rlan_tab_data[5]);
2953 
2954         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1, rlan_tab_data[1]);
2955         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2, rlan_tab_data[2]);
2956         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3, rlan_tab_data[3]);
2957         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4, rlan_tab_data[4]);
2958         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5, rlan_tab_data[5]);
2959         NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0, rlan_tab_data[0]);
2960     }
2961 
2962     return NVL_SUCCESS;
2963 }
2964 
2965 /*
2966  * @Brief : Send priv ring command and wait for completion
2967  *
2968  * @Description :
2969  *
2970  * @param[in] device        a reference to the device to initialize
2971  * @param[in] cmd           encoded priv ring command
2972  */
2973 NvlStatus
nvswitch_ring_master_cmd_lr10(nvswitch_device * device,NvU32 cmd)2974 nvswitch_ring_master_cmd_lr10
2975 (
2976     nvswitch_device *device,
2977     NvU32 cmd
2978 )
2979 {
2980     NvU32 value;
2981     NVSWITCH_TIMEOUT timeout;
2982     NvBool           keepPolling;
2983 
2984     NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _RING_COMMAND, cmd);
2985 
2986     nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
2987     do
2988     {
2989         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
2990 
2991         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_COMMAND);
2992         if (FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
2993         {
2994             break;
2995         }
2996 
2997         nvswitch_os_sleep(1);
2998     }
2999     while (keepPolling);
3000 
3001     if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value))
3002     {
3003         NVSWITCH_PRINT(device, ERROR,
3004             "%s: Timeout waiting for RING_COMMAND == NO_CMD (cmd=0x%x).\n",
3005             __FUNCTION__, cmd);
3006         return -NVL_INITIALIZATION_TOTAL_FAILURE;
3007     }
3008 
3009     return NVL_SUCCESS;
3010 }
3011 
3012 /*
3013  * @brief Process the information read from ROM tables and apply it to device
3014  * settings.
3015  *
3016  * @param[in] device    a reference to the device to query
3017  * @param[in] firmware  Information parsed from ROM tables
3018  */
3019 static void
_nvswitch_process_firmware_info_lr10(nvswitch_device * device,NVSWITCH_FIRMWARE * firmware)3020 _nvswitch_process_firmware_info_lr10
3021 (
3022     nvswitch_device *device,
3023     NVSWITCH_FIRMWARE *firmware
3024 )
3025 {
3026     NvU32 idx_link;
3027     NvU64 link_enable_mask;
3028 
3029     if (device->firmware.firmware_size == 0)
3030     {
3031         return;
3032     }
3033 
3034     if (device->firmware.nvlink.link_config_found)
3035     {
3036         link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3037                             (NvU64)device->regkeys.link_enable_mask);
3038         //
3039         // If the link enables were not already overridden by regkey, then
3040         // apply the ROM link enables
3041         //
3042         if (link_enable_mask == NV_U64_MAX)
3043         {
3044             for (idx_link = 0; idx_link < nvswitch_get_num_links(device); idx_link++)
3045             {
3046                 if ((device->firmware.nvlink.link_enable_mask & NVBIT64(idx_link)) == 0)
3047                 {
3048                     device->link[idx_link].valid = NV_FALSE;
3049                 }
3050             }
3051         }
3052     }
3053 }
3054 
3055 void
nvswitch_init_npg_multicast_lr10(nvswitch_device * device)3056 nvswitch_init_npg_multicast_lr10
3057 (
3058     nvswitch_device *device
3059 )
3060 {
3061     NvU32 idx_npg;
3062     NvU32 idx_nport;
3063     NvU32 nport_mask;
3064 
3065     //
3066     // Walk the NPGs and build the mask of extant NPORTs
3067     //
3068     for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++)
3069     {
3070         if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg))
3071         {
3072             nport_mask = 0;
3073             for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++)
3074             {
3075                 nport_mask |=
3076                     (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ?
3077                     NVBIT(idx_nport) : 0x0);
3078             }
3079 
3080             NVSWITCH_NPG_WR32_LR10(device, idx_npg,
3081                 _NPG, _CTRL_PRI_MULTICAST,
3082                 DRF_NUM(_NPG, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3083                 DRF_DEF(_NPG, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3084 
3085             NVSWITCH_NPGPERF_WR32_LR10(device, idx_npg,
3086                 _NPGPERF, _CTRL_PRI_MULTICAST,
3087                 DRF_NUM(_NPGPERF, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) |
3088                 DRF_DEF(_NPGPERF, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES));
3089         }
3090     }
3091 }
3092 
3093 static NvlStatus
nvswitch_clear_nport_rams_lr10(nvswitch_device * device)3094 nvswitch_clear_nport_rams_lr10
3095 (
3096     nvswitch_device *device
3097 )
3098 {
3099     NvU32 idx_nport;
3100     NvU64 nport_mask = 0;
3101     NvU32 zero_init_mask;
3102     NvU32 val;
3103     NVSWITCH_TIMEOUT timeout;
3104     NvBool           keepPolling;
3105     NvlStatus retval = NVL_SUCCESS;
3106 
3107     // Build the mask of available NPORTs
3108     for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3109     {
3110         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3111         {
3112             nport_mask |= NVBIT64(idx_nport);
3113         }
3114     }
3115 
3116     // Start the HW zero init
3117     zero_init_mask =
3118         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT) |
3119         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_1, _HWINIT) |
3120         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_2, _HWINIT) |
3121         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_3, _HWINIT) |
3122         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_4, _HWINIT) |
3123         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_5, _HWINIT) |
3124         DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_6, _HWINIT) |
3125         DRF_DEF(_NPORT, _INITIALIZATION, _LINKTABLEINIT, _HWINIT) |
3126         DRF_DEF(_NPORT, _INITIALIZATION, _REMAPTABINIT,  _HWINIT) |
3127         DRF_DEF(_NPORT, _INITIALIZATION, _RIDTABINIT,    _HWINIT) |
3128         DRF_DEF(_NPORT, _INITIALIZATION, _RLANTABINIT,   _HWINIT);
3129 
3130     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _NPORT, _INITIALIZATION,
3131         zero_init_mask);
3132 
3133     nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
3134 
3135     do
3136     {
3137         keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
3138 
3139         // Check each enabled NPORT that is still pending until all are done
3140         for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++)
3141         {
3142             if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport) && (nport_mask & NVBIT64(idx_nport)))
3143             {
3144                 val = NVSWITCH_ENG_RD32_LR10(device, NPORT, idx_nport, _NPORT, _INITIALIZATION);
3145                 if (val == zero_init_mask)
3146                 {
3147                     nport_mask &= ~NVBIT64(idx_nport);
3148                 }
3149             }
3150         }
3151 
3152         if (nport_mask == 0)
3153         {
3154             break;
3155         }
3156 
3157         nvswitch_os_sleep(1);
3158     }
3159     while (keepPolling);
3160 
3161     if (nport_mask != 0)
3162     {
3163         NVSWITCH_PRINT(device, WARN,
3164             "%s: Timeout waiting for NV_NPORT_INITIALIZATION (0x%llx)\n",
3165             __FUNCTION__, nport_mask);
3166         // Bug 2974064: Review this timeout handling (fall through)
3167         retval = -NVL_ERR_INVALID_STATE;
3168     }
3169 
3170     //bug 2737147 requires SW To init this crumbstore setting for LR10
3171     val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0)             |
3172           DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
3173           DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 0)        |
3174           DRF_DEF(_TSTATE, _RAM_ADDRESS, _VC, _VC5_TRANSDONE);
3175 
3176     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _RAM_ADDRESS, val);
3177 
3178     return retval;
3179 }
3180 
3181 static void
_nvswitch_init_nport_ecc_control_lr10(nvswitch_device * device)3182 _nvswitch_init_nport_ecc_control_lr10
3183 (
3184     nvswitch_device *device
3185 )
3186 {
3187     // Set ingress ECC error limits
3188     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER,
3189         DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3190     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, 1);
3191 
3192     // Set egress ECC error limits
3193     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER,
3194         DRF_NUM(_EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3195     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, 1);
3196 
3197     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER,
3198         DRF_NUM(_EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3199     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, 1);
3200 
3201     // Set route ECC error limits
3202     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER,
3203         DRF_NUM(_ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3204     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER_LIMIT, 1);
3205 
3206     // Set tstate ECC error limits
3207     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER,
3208         DRF_NUM(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3209     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3210 
3211     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER,
3212         DRF_NUM(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3213     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, 1);
3214 
3215     // Set sourcetrack ECC error limits to _PROD value
3216     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3217         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3218     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3219 
3220     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
3221         DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
3222     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1);
3223 
3224     // Enable ECC/parity
3225     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_ECC_CTRL,
3226         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_HDR_ECC_ENABLE, __PROD) |
3227         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD) |
3228         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _REMAPTAB_ECC_ENABLE, __PROD) |
3229         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RIDTAB_ECC_ENABLE, __PROD) |
3230         DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RLANTAB_ECC_ENABLE, __PROD));
3231 
3232     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_ECC_CTRL,
3233         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_ECC_ENABLE, __PROD) |
3234         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_PARITY_ENABLE, __PROD) |
3235         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _RAM_OUT_ECC_ENABLE, __PROD) |
3236         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_ECC_ENABLE, __PROD) |
3237         DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD));
3238 
3239     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_ECC_CTRL,
3240         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _GLT_ECC_ENABLE, __PROD) |
3241         DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _NVS_ECC_ENABLE, __PROD));
3242 
3243     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_ECC_CTRL,
3244         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _CRUMBSTORE_ECC_ENABLE, __PROD) |
3245         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TAGPOOL_ECC_ENABLE, __PROD) |
3246         DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TD_TID_ECC_ENABLE, _DISABLE));
3247 
3248     NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_ECC_CTRL,
3249         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD) |
3250         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE, _DISABLE) |
3251         DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE, __PROD));
3252 }
3253 
3254 static void
_nvswitch_init_cmd_routing(nvswitch_device * device)3255 _nvswitch_init_cmd_routing
3256 (
3257     nvswitch_device *device
3258 )
3259 {
3260     NvU32 val;
3261 
3262     //Set Hash policy for the requests.
3263     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN1, _SPRAY) |
3264           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN2, _SPRAY) |
3265           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN4, _SPRAY) |
3266           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN7, _SPRAY);
3267     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE0, val);
3268 
3269     // Set Random policy for reponses.
3270     val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN16, _RANDOM) |
3271           DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN17, _RANDOM);
3272     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE2, val);
3273 }
3274 
3275 static NvlStatus
_nvswitch_init_portstat_counters(nvswitch_device * device)3276 _nvswitch_init_portstat_counters
3277 (
3278     nvswitch_device *device
3279 )
3280 {
3281     NvlStatus retval;
3282     NvU32 idx_channel;
3283     NVSWITCH_SET_LATENCY_BINS default_latency_bins;
3284     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3285 
3286     chip_device->latency_stats = nvswitch_os_malloc(sizeof(NVSWITCH_LATENCY_STATS_LR10));
3287     if (chip_device->latency_stats == NULL)
3288     {
3289         NVSWITCH_PRINT(device, ERROR, "%s: Failed allocate memory for latency stats\n",
3290             __FUNCTION__);
3291         return -NVL_NO_MEM;
3292     }
3293 
3294     nvswitch_os_memset(chip_device->latency_stats, 0, sizeof(NVSWITCH_LATENCY_STATS_LR10));
3295 
3296     //
3297     // These bin thresholds are values provided by Arch based off
3298     // switch latency expectations.
3299     //
3300     for (idx_channel=0; idx_channel < NVSWITCH_NUM_VCS_LR10; idx_channel++)
3301     {
3302         default_latency_bins.bin[idx_channel].lowThreshold = 120;    // 120ns
3303         default_latency_bins.bin[idx_channel].medThreshold = 200;    // 200ns
3304         default_latency_bins.bin[idx_channel].hiThreshold  = 1000;   // 1us
3305     }
3306 
3307     chip_device->latency_stats->sample_interval_msec = 3000; // 3 second sample interval
3308 
3309     retval = nvswitch_ctrl_set_latency_bins(device, &default_latency_bins);
3310     if (retval != NVL_SUCCESS)
3311     {
3312         NVSWITCH_PRINT(device, ERROR, "%s: Failed to set latency bins\n",
3313             __FUNCTION__);
3314         NVSWITCH_ASSERT(0);
3315         return retval;
3316     }
3317 
3318     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_CONTROL,
3319         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _SWEEPMODE, _SWONDEMAND) |
3320         DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _RANGESELECT, _BITS13TO0));
3321 
3322      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_0,
3323          DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_0, _SRCFILTERBIT, 0xFFFFFFFF));
3324 
3325     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_1,
3326         DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_1, _SRCFILTERBIT, 0xF));
3327 
3328     // Set window limit to the maximum value
3329     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_WINDOW_LIMIT, 0xffffffff);
3330 
3331      NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _GLBLLATENCYTIMERCTRL,
3332          DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, _ENABLE));
3333 
3334      NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL,
3335          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) |
3336          DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE));
3337 
3338      return NVL_SUCCESS;
3339 }
3340 
3341 NvlStatus
nvswitch_init_nxbar_lr10(nvswitch_device * device)3342 nvswitch_init_nxbar_lr10
3343 (
3344     nvswitch_device *device
3345 )
3346 {
3347     NvU32 tileout;
3348 
3349     // Setting this bit will send error detection info to NPG.
3350     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_ERR_CYA,
3351         DRF_DEF(_NXBAR, _TILE_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3352 
3353     for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++)
3354     {
3355         NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_TILEOUT_ERR_CYA(tileout),
3356             DRF_DEF(_NXBAR, _TC_TILEOUT0_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD));
3357     }
3358 
3359     // Enable idle-based clk gating and setup delay count.
3360     NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_PRI_NXBAR_TILE_CG,
3361         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_EN, __PROD) |
3362         DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_DLY_CNT, __PROD));
3363 
3364     NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_PRI_NXBAR_TC_CG,
3365         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_EN, __PROD) |
3366         DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_DLY_CNT, __PROD));
3367 
3368     return NVL_SUCCESS;
3369 }
3370 
3371 NvlStatus
nvswitch_init_nport_lr10(nvswitch_device * device)3372 nvswitch_init_nport_lr10
3373 (
3374     nvswitch_device *device
3375 )
3376 {
3377     NvU32 data32, timeout;
3378     NvU32 idx_nport;
3379     NvU32 num_nports;
3380 
3381     num_nports = NVSWITCH_ENG_COUNT(device, NPORT, );
3382 
3383     for (idx_nport = 0; idx_nport < num_nports; idx_nport++)
3384     {
3385         // Find the first valid nport
3386         if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport))
3387         {
3388             break;
3389         }
3390     }
3391 
3392     // There were no valid nports
3393     if (idx_nport == num_nports)
3394     {
3395         NVSWITCH_PRINT(device, ERROR, "%s: No valid nports found!\n", __FUNCTION__);
3396         return -NVL_ERR_INVALID_STATE;
3397     }
3398 
3399     _nvswitch_init_nport_ecc_control_lr10(device);
3400 
3401     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _ROUTE, _ROUTE_CONTROL);
3402     data32 = FLD_SET_DRF(_ROUTE, _ROUTE_CONTROL, _URRESPENB, __PROD, data32);
3403     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ROUTE_CONTROL, data32);
3404 
3405     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _EGRESS, _CTRL);
3406     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _DESTINATIONIDCHECKENB, __PROD, data32);
3407     data32 = FLD_SET_DRF(_EGRESS, _CTRL, _CTO_ENB, __PROD, data32);
3408     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTRL, data32);
3409 
3410     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTO_TIMER_LIMIT,
3411         DRF_DEF(_EGRESS, _CTO_TIMER_LIMIT, _LIMIT, __PROD));
3412 
3413     if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) ==
3414         NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE)
3415     {
3416         // ATO Disable
3417         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3418         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _OFF, data32);
3419         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3420     }
3421     else
3422     {
3423         // ATO Enable
3424         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL);
3425         data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _ON, data32);
3426         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32);
3427 
3428         // ATO Timeout value
3429         timeout = DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _TIMEOUT, device->regkeys.ato_control);
3430         if (timeout != NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT)
3431         {
3432             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3433                 DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout));
3434         }
3435         else
3436         {
3437             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT,
3438                 DRF_DEF(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, __PROD));
3439         }
3440     }
3441 
3442     if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) ==
3443         NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE)
3444     {
3445         // STO Disable
3446         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3447         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _OFF, data32);
3448         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3449     }
3450     else
3451     {
3452         // STO Enable
3453         data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL);
3454         data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _ON, data32);
3455         NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32);
3456 
3457         // STO Timeout value
3458         timeout = DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _TIMEOUT, device->regkeys.sto_control);
3459         if (timeout != NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT)
3460         {
3461             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3462                 DRF_NUM(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, timeout));
3463         }
3464         else
3465         {
3466             NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0,
3467                 DRF_DEF(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, __PROD));
3468         }
3469     }
3470 
3471     //
3472     // WAR for bug 200606509
3473     // Disable CAM for entry 0 to prevent false ATO trigger
3474     //
3475     data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _CREQ_CAM_LOCK);
3476     data32 = DRF_NUM(_TSTATE, _CREQ_CAM_LOCK, _ON, 0x1);
3477     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _CREQ_CAM_LOCK, data32);
3478 
3479     //
3480     // WAR for bug 3115824
3481     // Clear CONTAIN_AND_DRAIN during init for links in reset.
3482     // Since SBR does not clear CONTAIN_AND_DRAIN, this will clear the bit
3483     // when the driver is reloaded after an SBR. If the driver has been reloaded
3484     // without an SBR, then CONTAIN_AND_DRAIN will be re-triggered.
3485     //
3486     NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CONTAIN_AND_DRAIN,
3487         DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
3488 
3489     return NVL_SUCCESS;
3490 }
3491 
3492 void *
nvswitch_alloc_chipdevice_lr10(nvswitch_device * device)3493 nvswitch_alloc_chipdevice_lr10
3494 (
3495     nvswitch_device *device
3496 )
3497 {
3498     void *chip_device;
3499 
3500     chip_device = nvswitch_os_malloc(sizeof(lr10_device));
3501     if (NULL != chip_device)
3502     {
3503         nvswitch_os_memset(chip_device, 0, sizeof(lr10_device));
3504     }
3505 
3506     device->chip_id = NV_PSMC_BOOT_42_CHIP_ID_LR10;
3507     return(chip_device);
3508 }
3509 
3510 static NvlStatus
nvswitch_initialize_pmgr_lr10(nvswitch_device * device)3511 nvswitch_initialize_pmgr_lr10
3512 (
3513     nvswitch_device *device
3514 )
3515 {
3516     nvswitch_init_pmgr_lr10(device);
3517     nvswitch_init_pmgr_devices_lr10(device);
3518 
3519     return NVL_SUCCESS;
3520 }
3521 
3522 static NvlStatus
nvswitch_initialize_route_lr10(nvswitch_device * device)3523 nvswitch_initialize_route_lr10
3524 (
3525     nvswitch_device *device
3526 )
3527 {
3528     NvlStatus retval;
3529 
3530     retval = _nvswitch_init_ganged_link_routing(device);
3531     if (NVL_SUCCESS != retval)
3532     {
3533         NVSWITCH_PRINT(device, ERROR,
3534             "%s: Failed to initialize GLT\n",
3535             __FUNCTION__);
3536         goto nvswitch_initialize_route_exit;
3537     }
3538 
3539     _nvswitch_init_cmd_routing(device);
3540 
3541     // Initialize Portstat Counters
3542     retval = _nvswitch_init_portstat_counters(device);
3543     if (NVL_SUCCESS != retval)
3544     {
3545         NVSWITCH_PRINT(device, ERROR,
3546             "%s: Failed to initialize portstat counters\n",
3547             __FUNCTION__);
3548         goto nvswitch_initialize_route_exit;
3549     }
3550 
3551 nvswitch_initialize_route_exit:
3552     return retval;
3553 }
3554 
3555 
3556 NvlStatus
nvswitch_pri_ring_init_lr10(nvswitch_device * device)3557 nvswitch_pri_ring_init_lr10
3558 (
3559     nvswitch_device *device
3560 )
3561 {
3562     NvU32 i;
3563     NvU32 value;
3564     NvBool enumerated = NV_FALSE;
3565     NvlStatus retval = NVL_SUCCESS;
3566 
3567     //
3568     // Sometimes on RTL simulation we see the priv ring initialization fail.
3569     // Retry up to 3 times until this issue is root caused. Bug 1826216.
3570     //
3571     for (i = 0; !enumerated && (i < 3); i++)
3572     {
3573         value = DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ENUMERATE_AND_START_RING);
3574         retval = nvswitch_ring_master_cmd_lr10(device, value);
3575         if (retval != NVL_SUCCESS)
3576         {
3577             NVSWITCH_PRINT(device, ERROR,
3578                 "%s: PRIV ring enumeration failed\n",
3579                 __FUNCTION__);
3580             continue;
3581         }
3582 
3583         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_START_RESULTS);
3584         if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_START_RESULTS, _CONNECTIVITY, _PASS, value))
3585         {
3586             NVSWITCH_PRINT(device, ERROR,
3587                 "%s: PRIV ring connectivity failed\n",
3588                 __FUNCTION__);
3589             continue;
3590         }
3591 
3592         value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0);
3593         if (value)
3594         {
3595             NVSWITCH_PRINT(device, ERROR,
3596                 "%s: NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0 = %x\n",
3597                 __FUNCTION__, value);
3598 
3599             if ((!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3600                     _RING_START_CONN_FAULT, 0, value)) ||
3601                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3602                     _DISCONNECT_FAULT, 0, value))      ||
3603                 (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0,
3604                     _OVERFLOW_FAULT, 0, value)))
3605             {
3606                 NVSWITCH_PRINT(device, ERROR,
3607                     "%s: PRIV ring error interrupt\n",
3608                     __FUNCTION__);
3609             }
3610 
3611             (void)nvswitch_ring_master_cmd_lr10(device,
3612                     DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT));
3613 
3614             continue;
3615         }
3616 
3617         enumerated = NV_TRUE;
3618     }
3619 
3620     if (!enumerated)
3621     {
3622         NVSWITCH_PRINT(device, ERROR,
3623             "%s: Cannot enumerate PRIV ring!\n",
3624             __FUNCTION__);
3625         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3626     }
3627 
3628     return retval;
3629 }
3630 
3631 /*
3632  * @Brief : Initializes an NvSwitch hardware state
3633  *
3634  * @Description :
3635  *
3636  * @param[in] device        a reference to the device to initialize
3637  *
3638  * @returns                 NVL_SUCCESS if the action succeeded
3639  *                          -NVL_BAD_ARGS if bad arguments provided
3640  *                          -NVL_PCI_ERROR if bar info unable to be retrieved
3641  */
3642 NvlStatus
nvswitch_initialize_device_state_lr10(nvswitch_device * device)3643 nvswitch_initialize_device_state_lr10
3644 (
3645     nvswitch_device *device
3646 )
3647 {
3648     NvlStatus retval = NVL_SUCCESS;
3649 
3650     // alloc chip-specific device structure
3651     device->chip_device = nvswitch_alloc_chipdevice(device);
3652     if (NULL == device->chip_device)
3653     {
3654         NVSWITCH_PRINT(device, ERROR,
3655             "nvswitch_os_malloc during chip_device creation failed!\n");
3656         retval = -NVL_NO_MEM;
3657         goto nvswitch_initialize_device_state_exit;
3658     }
3659 
3660     retval = nvswitch_check_io_sanity(device);
3661     if (NVL_SUCCESS != retval)
3662     {
3663         NVSWITCH_PRINT(device, ERROR,
3664             "%s: IO sanity test failed\n",
3665             __FUNCTION__);
3666         goto nvswitch_initialize_device_state_exit;
3667     }
3668 
3669     NVSWITCH_PRINT(device, SETUP,
3670         "%s: MMIO discovery\n",
3671         __FUNCTION__);
3672     retval = nvswitch_device_discovery(device, NV_SWPTOP_TABLE_BASE_ADDRESS_OFFSET);
3673     if (NVL_SUCCESS != retval)
3674     {
3675         NVSWITCH_PRINT(device, ERROR,
3676             "%s: Engine discovery failed\n",
3677             __FUNCTION__);
3678         goto nvswitch_initialize_device_state_exit;
3679     }
3680 
3681     nvswitch_filter_discovery(device);
3682 
3683     retval = nvswitch_process_discovery(device);
3684     if (NVL_SUCCESS != retval)
3685     {
3686         NVSWITCH_PRINT(device, ERROR,
3687             "%s: Discovery processing failed\n",
3688             __FUNCTION__);
3689         goto nvswitch_initialize_device_state_exit;
3690     }
3691 
3692     // now that we have completed discovery, perform initialization steps that
3693     // depend on engineDescriptors being initialized
3694     //
3695     // Temporary location, really needs to be done somewhere common to all flcnables
3696     if (nvswitch_is_soe_supported(device))
3697     {
3698         flcnablePostDiscoveryInit(device, device->pSoe);
3699     }
3700     else
3701     {
3702         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE post discovery init.\n",
3703             __FUNCTION__);
3704     }
3705 
3706     // Make sure interrupts are disabled before we enable interrupts with the OS.
3707     nvswitch_lib_disable_interrupts(device);
3708 
3709     retval = nvswitch_pri_ring_init(device);
3710     if (retval != NVL_SUCCESS)
3711     {
3712         NVSWITCH_PRINT(device, ERROR, "%s: PRI init failed\n", __FUNCTION__);
3713         goto nvswitch_initialize_device_state_exit;
3714     }
3715 
3716     NVSWITCH_PRINT(device, SETUP,
3717         "%s: Enabled links: 0x%llx\n",
3718         __FUNCTION__,
3719         ((NvU64)device->regkeys.link_enable_mask2 << 32 |
3720         (NvU64)device->regkeys.link_enable_mask) &
3721         ((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device))));
3722 
3723     // Detect TNVL mode
3724     nvswitch_detect_tnvl_mode(device);
3725 
3726     if (nvswitch_is_soe_supported(device))
3727     {
3728         retval = nvswitch_init_soe(device);
3729         if (NVL_SUCCESS != retval)
3730         {
3731             NVSWITCH_PRINT(device, ERROR, "%s: Init SOE failed\n",
3732                 __FUNCTION__);
3733             goto nvswitch_initialize_device_state_exit;
3734         }
3735     }
3736     else
3737     {
3738         NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE init.\n",
3739             __FUNCTION__);
3740     }
3741 
3742     // Read ROM configuration
3743     nvswitch_read_rom_tables(device, &device->firmware);
3744     _nvswitch_process_firmware_info_lr10(device, &device->firmware);
3745 
3746     // Init PMGR info
3747     retval = nvswitch_initialize_pmgr(device);
3748     if (retval != NVL_SUCCESS)
3749     {
3750         NVSWITCH_PRINT(device, ERROR,
3751             "%s: PMGR init failed\n", __FUNCTION__);
3752         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3753         goto nvswitch_initialize_device_state_exit;
3754     }
3755 
3756     retval = nvswitch_init_pll_config(device);
3757     if (retval != NVL_SUCCESS)
3758     {
3759         NVSWITCH_PRINT(device, ERROR,
3760             "%s: failed\n", __FUNCTION__);
3761         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3762         goto nvswitch_initialize_device_state_exit;
3763     }
3764 
3765     //
3766     // PLL init should be done *first* before other hardware init
3767     //
3768     retval = nvswitch_init_pll(device);
3769     if (NVL_SUCCESS != retval)
3770     {
3771         NVSWITCH_PRINT(device, ERROR,
3772             "%s: PLL init failed\n",
3773             __FUNCTION__);
3774         goto nvswitch_initialize_device_state_exit;
3775     }
3776 
3777     //
3778     // Now that software knows the devices and addresses, it must take all
3779     // the wrapper modules out of reset.  It does this by writing to the
3780     // PMC module enable registers.
3781     //
3782 
3783     // Init IP wrappers
3784 //    _nvswitch_init_mc_enable_lr10(device);
3785     retval = nvswitch_initialize_ip_wrappers(device);
3786     if (retval != NVL_SUCCESS)
3787     {
3788         NVSWITCH_PRINT(device, ERROR,
3789             "%s: init failed\n", __FUNCTION__);
3790         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3791         goto nvswitch_initialize_device_state_exit;
3792     }
3793 
3794     nvswitch_init_warm_reset(device);
3795     nvswitch_init_npg_multicast(device);
3796     retval = nvswitch_clear_nport_rams(device);
3797     if (NVL_SUCCESS != retval)
3798     {
3799         NVSWITCH_PRINT(device, ERROR,
3800             "%s: NPORT RAM clear failed\n",
3801             __FUNCTION__);
3802         goto nvswitch_initialize_device_state_exit;
3803     }
3804 
3805     retval = nvswitch_init_nport(device);
3806     if (retval != NVL_SUCCESS)
3807     {
3808         NVSWITCH_PRINT(device, ERROR,
3809             "%s: Init NPORTs failed\n",
3810             __FUNCTION__);
3811         goto nvswitch_initialize_device_state_exit;
3812     }
3813 
3814     retval = nvswitch_init_nxbar(device);
3815     if (retval != NVL_SUCCESS)
3816     {
3817         NVSWITCH_PRINT(device, ERROR,
3818             "%s: Init NXBARs failed\n",
3819             __FUNCTION__);
3820         goto nvswitch_initialize_device_state_exit;
3821     }
3822 
3823     if (device->regkeys.minion_disable != NV_SWITCH_REGKEY_MINION_DISABLE_YES)
3824     {
3825         NVSWITCH_PRINT(device, WARN, "%s: Entering init minion\n", __FUNCTION__);
3826 
3827         retval = nvswitch_init_minion(device);
3828         if (NVL_SUCCESS != retval)
3829         {
3830             NVSWITCH_PRINT(device, ERROR,
3831                 "%s: Init MINIONs failed\n",
3832                 __FUNCTION__);
3833             goto nvswitch_initialize_device_state_exit;
3834         }
3835     }
3836     else
3837     {
3838         NVSWITCH_PRINT(device, INFO, "MINION is disabled via regkey.\n");
3839 
3840         NVSWITCH_PRINT(device, INFO, "%s: Skipping MINION init\n",
3841             __FUNCTION__);
3842     }
3843 
3844     _nvswitch_setup_chiplib_forced_config_lr10(device);
3845 
3846     // Init route
3847     retval = nvswitch_initialize_route(device);
3848     if (retval != NVL_SUCCESS)
3849     {
3850         NVSWITCH_PRINT(device, ERROR,
3851             "%s: route init failed\n", __FUNCTION__);
3852         retval = -NVL_INITIALIZATION_TOTAL_FAILURE;
3853         goto nvswitch_initialize_device_state_exit;
3854     }
3855 
3856     nvswitch_init_clock_gating(device);
3857 
3858     // Initialize SPI
3859     if (nvswitch_is_spi_supported(device))
3860     {
3861         retval = nvswitch_spi_init(device);
3862         if (NVL_SUCCESS != retval)
3863         {
3864             NVSWITCH_PRINT(device, ERROR,
3865                 "%s: SPI init failed!, rc: %d\n",
3866                 __FUNCTION__, retval);
3867             goto nvswitch_initialize_device_state_exit;
3868         }
3869     }
3870     else
3871     {
3872         NVSWITCH_PRINT(device, WARN,
3873             "%s: Skipping SPI init.\n",
3874             __FUNCTION__);
3875     }
3876 
3877     // Initialize SMBPBI
3878     if (nvswitch_is_smbpbi_supported(device))
3879     {
3880         retval = nvswitch_smbpbi_init(device);
3881         if (NVL_SUCCESS != retval)
3882         {
3883             NVSWITCH_PRINT(device, ERROR,
3884                 "%s: SMBPBI init failed!, rc: %d\n",
3885                 __FUNCTION__, retval);
3886             goto nvswitch_initialize_device_state_exit;
3887         }
3888     }
3889     else
3890     {
3891         NVSWITCH_PRINT(device, WARN,
3892             "%s: Skipping SMBPBI init.\n",
3893             __FUNCTION__);
3894     }
3895 
3896     nvswitch_initialize_interrupt_tree(device);
3897 
3898     // Initialize external thermal sensor
3899     retval = nvswitch_init_thermal(device);
3900     if (NVL_SUCCESS != retval)
3901     {
3902         NVSWITCH_PRINT(device, ERROR,
3903             "%s: External Thermal init failed\n",
3904             __FUNCTION__);
3905     }
3906 
3907     return NVL_SUCCESS;
3908 
3909 nvswitch_initialize_device_state_exit:
3910     nvswitch_destroy_device_state(device);
3911 
3912     return retval;
3913 }
3914 
3915 /*
3916  * @Brief : Destroys an NvSwitch hardware state
3917  *
3918  * @Description :
3919  *
3920  * @param[in] device        a reference to the device to initialize
3921  */
3922 void
nvswitch_destroy_device_state_lr10(nvswitch_device * device)3923 nvswitch_destroy_device_state_lr10
3924 (
3925     nvswitch_device *device
3926 )
3927 {
3928     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
3929 
3930     if (nvswitch_is_soe_supported(device))
3931     {
3932         nvswitch_soe_unregister_events(device);
3933     }
3934 
3935     if (chip_device != NULL)
3936     {
3937         if ((chip_device->latency_stats) != NULL)
3938         {
3939             nvswitch_os_free(chip_device->latency_stats);
3940         }
3941 
3942         if ((chip_device->ganged_link_table) != NULL)
3943         {
3944             nvswitch_os_free(chip_device->ganged_link_table);
3945         }
3946 
3947         nvswitch_free_chipdevice(device);
3948     }
3949 
3950     nvswitch_i2c_destroy(device);
3951 
3952     return;
3953 }
3954 
3955 static void
_nvswitch_set_nvlink_caps_lr10(NvU32 * pCaps)3956 _nvswitch_set_nvlink_caps_lr10
3957 (
3958     NvU32 *pCaps
3959 )
3960 {
3961     NvU8 tempCaps[NVSWITCH_NVLINK_CAPS_TBL_SIZE];
3962 
3963     nvswitch_os_memset(tempCaps, 0, sizeof(tempCaps));
3964 
3965     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _VALID);
3966     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SUPPORTED);
3967     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_SUPPORTED);
3968     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_ATOMICS);
3969 
3970     // Assume IBM P9 for PPC -- TODO Xavier support.
3971 #if defined(NVCPU_PPC64LE)
3972     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ACCESS);
3973     NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ATOMICS);
3974 #endif
3975 
3976     nvswitch_os_memcpy(pCaps, tempCaps, sizeof(tempCaps));
3977 }
3978 
3979 /*
3980  * @brief Determines if a link's lanes are reversed
3981  *
3982  * @param[in] device    a reference to the device to query
3983  * @param[in] linkId    Target link ID
3984  *
3985  * @return NV_TRUE if a link's lanes are reversed
3986  */
3987 NvBool
nvswitch_link_lane_reversed_lr10(nvswitch_device * device,NvU32 linkId)3988 nvswitch_link_lane_reversed_lr10
3989 (
3990     nvswitch_device *device,
3991     NvU32            linkId
3992 )
3993 {
3994     NvU32 regData;
3995     nvlink_link *link;
3996 
3997     link = nvswitch_get_link(device, linkId);
3998     if ((link == NULL) || nvswitch_is_link_in_reset(device, link))
3999     {
4000         return NV_FALSE;
4001     }
4002 
4003     regData = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_RX, _CONFIG_RX);
4004 
4005     // HW may reverse the lane ordering or it may be overridden by SW.
4006     if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _REVERSAL_OVERRIDE, _ON, regData))
4007     {
4008         // Overridden
4009         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _LANE_REVERSE, _ON, regData))
4010         {
4011             return NV_TRUE;
4012         }
4013         else
4014         {
4015             return NV_FALSE;
4016         }
4017     }
4018     else
4019     {
4020         // Sensed in HW
4021         if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _HW_LANE_REVERSE, _ON, regData))
4022         {
4023             return NV_TRUE;
4024         }
4025         else
4026         {
4027             return NV_FALSE;
4028         }
4029     }
4030 
4031     return NV_FALSE;
4032 }
4033 
4034 NvlStatus
nvswitch_ctrl_get_nvlink_status_lr10(nvswitch_device * device,NVSWITCH_GET_NVLINK_STATUS_PARAMS * ret)4035 nvswitch_ctrl_get_nvlink_status_lr10
4036 (
4037     nvswitch_device *device,
4038     NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret
4039 )
4040 {
4041     NvlStatus retval = NVL_SUCCESS;
4042     nvlink_link *link;
4043     NvU8 i;
4044     NvU32 linkState, txSublinkStatus, rxSublinkStatus;
4045     nvlink_conn_info conn_info = {0};
4046     NvU64 enabledLinkMask;
4047     NvU32 nvlink_caps_version;
4048 
4049     enabledLinkMask = nvswitch_get_enabled_link_mask(device);
4050     ret->enabledLinkMask = enabledLinkMask;
4051 
4052     FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask)
4053     {
4054         NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device));
4055 
4056         link = nvswitch_get_link(device, i);
4057 
4058         if ((link == NULL) ||
4059             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
4060             (i >= NVSWITCH_NVLINK_MAX_LINKS))
4061         {
4062             continue;
4063         }
4064 
4065         //
4066         // Call the core library to get the remote end information. On the first
4067         // invocation this will also trigger link training, if link-training is
4068         // not externally managed by FM. Therefore it is necessary that this be
4069         // before link status on the link is populated since this call will
4070         // actually change link state.
4071         //
4072         if (device->regkeys.external_fabric_mgmt)
4073         {
4074             nvlink_lib_get_remote_conn_info(link, &conn_info);
4075         }
4076         else
4077         {
4078             nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info, NVLINK_STATE_CHANGE_SYNC);
4079         }
4080 
4081         // Set NVLINK per-link caps
4082         _nvswitch_set_nvlink_caps_lr10(&ret->linkInfo[i].capsTbl);
4083 
4084         ret->linkInfo[i].phyType = NVSWITCH_NVLINK_STATUS_PHY_NVHS;
4085         ret->linkInfo[i].subLinkWidth = nvswitch_get_sublink_width(device, link->linkNumber);
4086 
4087         if (!nvswitch_is_link_in_reset(device, link))
4088         {
4089             linkState = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE);
4090             linkState = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, linkState);
4091 
4092             txSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
4093             txSublinkStatus = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, txSublinkStatus);
4094 
4095             rxSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
4096             rxSublinkStatus = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, rxSublinkStatus);
4097 
4098             ret->linkInfo[i].bLaneReversal = nvswitch_link_lane_reversed_lr10(device, i);
4099         }
4100         else
4101         {
4102             linkState       = NVSWITCH_NVLINK_STATUS_LINK_STATE_INIT;
4103             txSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_OFF;
4104             rxSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_OFF;
4105         }
4106 
4107         ret->linkInfo[i].linkState       = linkState;
4108         ret->linkInfo[i].txSublinkStatus = txSublinkStatus;
4109         ret->linkInfo[i].rxSublinkStatus = rxSublinkStatus;
4110 
4111         nvlink_caps_version = nvswitch_get_caps_nvlink_version(device);
4112         if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0)
4113         {
4114             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0;
4115             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_0;
4116         }
4117         else if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_4_0)
4118         {
4119             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_4_0;
4120             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_4_0;
4121         }
4122         else
4123         {
4124             NVSWITCH_PRINT(device, WARN,
4125                 "%s WARNING: Unknown NVSWITCH_NVLINK_CAPS_NVLINK_VERSION 0x%x\n",
4126                 __FUNCTION__, nvlink_caps_version);
4127             ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_INVALID;
4128             ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_INVALID;
4129         }
4130 
4131         ret->linkInfo[i].phyVersion = NVSWITCH_NVLINK_STATUS_NVHS_VERSION_1_0;
4132 
4133         if (conn_info.bConnected)
4134         {
4135             ret->linkInfo[i].connected = NVSWITCH_NVLINK_STATUS_CONNECTED_TRUE;
4136             ret->linkInfo[i].remoteDeviceLinkNumber = (NvU8)conn_info.linkNumber;
4137 
4138             ret->linkInfo[i].remoteDeviceInfo.domain = conn_info.domain;
4139             ret->linkInfo[i].remoteDeviceInfo.bus = conn_info.bus;
4140             ret->linkInfo[i].remoteDeviceInfo.device = conn_info.device;
4141             ret->linkInfo[i].remoteDeviceInfo.function = conn_info.function;
4142             ret->linkInfo[i].remoteDeviceInfo.pciDeviceId = conn_info.pciDeviceId;
4143             ret->linkInfo[i].remoteDeviceInfo.deviceType = conn_info.deviceType;
4144 
4145             ret->linkInfo[i].localLinkSid  = link->localSid;
4146             ret->linkInfo[i].remoteLinkSid = link->remoteSid;
4147 
4148             if (0 != conn_info.pciDeviceId)
4149             {
4150                 ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags =
4151                     FLD_SET_DRF(SWITCH_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS,
4152                          _PCI, ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags);
4153             }
4154 
4155             // Does not use loopback
4156             ret->linkInfo[i].loopProperty =
4157                 NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_NONE;
4158         }
4159         else
4160         {
4161             ret->linkInfo[i].connected =
4162                 NVSWITCH_NVLINK_STATUS_CONNECTED_FALSE;
4163             ret->linkInfo[i].remoteDeviceInfo.deviceType =
4164                 NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE;
4165         }
4166 
4167         // Set the device information for the local end of the link
4168         ret->linkInfo[i].localDeviceInfo.domain = device->nvlink_device->pciInfo.domain;
4169         ret->linkInfo[i].localDeviceInfo.bus = device->nvlink_device->pciInfo.bus;
4170         ret->linkInfo[i].localDeviceInfo.device = device->nvlink_device->pciInfo.device;
4171         ret->linkInfo[i].localDeviceInfo.function = device->nvlink_device->pciInfo.function;
4172         ret->linkInfo[i].localDeviceInfo.pciDeviceId = 0xdeadbeef; // TODO
4173         ret->linkInfo[i].localDeviceLinkNumber = i;
4174         ret->linkInfo[i].laneRxdetStatusMask = device->link[i].lane_rxdet_status_mask;
4175         ret->linkInfo[i].localDeviceInfo.deviceType =
4176             NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH;
4177 
4178         // Clock data
4179         ret->linkInfo[i].nvlinkLineRateMbps = nvswitch_minion_get_line_rate_Mbps_lr10(device, i);
4180         ret->linkInfo[i].nvlinkLinkDataRateKiBps = nvswitch_minion_get_data_rate_KiBps_lr10(device, i);
4181         ret->linkInfo[i].nvlinkLinkClockMhz = ret->linkInfo[i].nvlinkLineRateMbps / 32;
4182         ret->linkInfo[i].nvlinkRefClkSpeedMhz = 156;
4183         ret->linkInfo[i].nvlinkRefClkType = NVSWITCH_NVLINK_REFCLK_TYPE_NVHS;
4184 
4185     }
4186     FOR_EACH_INDEX_IN_MASK_END;
4187 
4188 //    NVSWITCH_ASSERT(ret->enabledLinkMask == enabledLinkMask);
4189 
4190     return retval;
4191 }
4192 
4193 NvlStatus
nvswitch_ctrl_get_counters_lr10(nvswitch_device * device,NVSWITCH_NVLINK_GET_COUNTERS_PARAMS * ret)4194 nvswitch_ctrl_get_counters_lr10
4195 (
4196     nvswitch_device *device,
4197     NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret
4198 )
4199 {
4200     nvlink_link *link;
4201     NvU8   i;
4202     NvU32  counterMask;
4203     NvU32  data;
4204     NvU32  val;
4205     NvU64  tx0TlCount;
4206     NvU64  tx1TlCount;
4207     NvU64  rx0TlCount;
4208     NvU64  rx1TlCount;
4209     NvU32  laneId;
4210     NvBool bLaneReversed;
4211     NvlStatus status;
4212     NvBool minion_enabled;
4213 
4214     ct_assert(NVSWITCH_NUM_LANES_LR10 <= NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE);
4215 
4216     link = nvswitch_get_link(device, ret->linkId);
4217     if ((link == NULL) ||
4218         !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
4219     {
4220         return -NVL_BAD_ARGS;
4221     }
4222 
4223     minion_enabled = nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
4224 
4225     counterMask = ret->counterMask;
4226 
4227     // Common usage allows one of these to stand for all of them
4228     if (counterMask & (NVSWITCH_NVLINK_COUNTER_TL_TX0 |
4229                        NVSWITCH_NVLINK_COUNTER_TL_TX1 |
4230                        NVSWITCH_NVLINK_COUNTER_TL_RX0 |
4231                        NVSWITCH_NVLINK_COUNTER_TL_RX1))
4232     {
4233         tx0TlCount = nvswitch_read_64bit_counter(device,
4234             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
4235             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
4236         if (NVBIT64(63) & tx0TlCount)
4237         {
4238             ret->bTx0TlCounterOverflow = NV_TRUE;
4239             tx0TlCount &= ~(NVBIT64(63));
4240         }
4241 
4242         tx1TlCount = nvswitch_read_64bit_counter(device,
4243             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(1)),
4244             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(1)));
4245         if (NVBIT64(63) & tx1TlCount)
4246         {
4247             ret->bTx1TlCounterOverflow = NV_TRUE;
4248             tx1TlCount &= ~(NVBIT64(63));
4249         }
4250 
4251         rx0TlCount = nvswitch_read_64bit_counter(device,
4252             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
4253             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
4254         if (NVBIT64(63) & rx0TlCount)
4255         {
4256             ret->bRx0TlCounterOverflow = NV_TRUE;
4257             rx0TlCount &= ~(NVBIT64(63));
4258         }
4259 
4260         rx1TlCount = nvswitch_read_64bit_counter(device,
4261             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(1)),
4262             NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(1)));
4263         if (NVBIT64(63) & rx1TlCount)
4264         {
4265             ret->bRx1TlCounterOverflow = NV_TRUE;
4266             rx1TlCount &= ~(NVBIT64(63));
4267         }
4268 
4269         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX0)] = tx0TlCount;
4270         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX1)] = tx1TlCount;
4271         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX0)] = rx0TlCount;
4272         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX1)] = rx1TlCount;
4273     }
4274 
4275     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4276     {
4277         if (minion_enabled)
4278         {
4279             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4280                                     NV_NVLSTAT_RX01, 0, &data);
4281             if (status != NVL_SUCCESS)
4282             {
4283                 return status;
4284             }
4285             data = DRF_VAL(_NVLSTAT, _RX01, _FLIT_CRC_ERRORS_VALUE, data);
4286         }
4287         else
4288         {
4289             // MINION disabled
4290             data = 0;
4291         }
4292 
4293         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)]
4294             = data;
4295     }
4296 
4297     data = 0x0;
4298     bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
4299 
4300     for (laneId = 0; laneId < NVSWITCH_NUM_LANES_LR10; laneId++)
4301     {
4302         //
4303         // HW may reverse the lane ordering or it may be overridden by SW.
4304         // If so, invert the interpretation of the lane CRC errors.
4305         //
4306         i = (NvU8)((bLaneReversed) ? (NVSWITCH_NUM_LANES_LR10 - 1) - laneId : laneId);
4307 
4308         if (minion_enabled)
4309         {
4310             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4311                                     NV_NVLSTAT_DB01, 0, &data);
4312             if (status != NVL_SUCCESS)
4313             {
4314                 return status;
4315             }
4316         }
4317         else
4318         {
4319             // MINION disabled
4320             data = 0;
4321         }
4322 
4323         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId))
4324         {
4325             val = BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId));
4326 
4327             switch (i)
4328             {
4329                 case 0:
4330                     ret->nvlinkCounters[val]
4331                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L0, data);
4332                     break;
4333                 case 1:
4334                     ret->nvlinkCounters[val]
4335                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L1, data);
4336                     break;
4337                 case 2:
4338                     ret->nvlinkCounters[val]
4339                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L2, data);
4340                     break;
4341                 case 3:
4342                     ret->nvlinkCounters[val]
4343                         = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L3, data);
4344                     break;
4345             }
4346         }
4347     }
4348 
4349     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4350     {
4351         if (minion_enabled)
4352         {
4353             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4354                                     NV_NVLSTAT_TX09, 0, &data);
4355             if (status != NVL_SUCCESS)
4356             {
4357                 return status;
4358             }
4359             data = DRF_VAL(_NVLSTAT, _TX09, _REPLAY_EVENTS_VALUE, data);
4360         }
4361         else
4362         {
4363             // MINION disabled
4364             data = 0;
4365         }
4366 
4367         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)]
4368             = data;
4369     }
4370 
4371     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4372     {
4373         if (minion_enabled)
4374         {
4375             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4376                                     NV_NVLSTAT_LNK1, 0, &data);
4377             if (status != NVL_SUCCESS)
4378             {
4379                 return status;
4380             }
4381             data = DRF_VAL(_NVLSTAT, _LNK1, _ERROR_COUNT1_RECOVERY_EVENTS_VALUE, data);
4382         }
4383         else
4384         {
4385             // MINION disabled
4386             data = 0;
4387         }
4388 
4389         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)]
4390             = data;
4391     }
4392 
4393     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)
4394     {
4395         if (minion_enabled)
4396         {
4397             status = nvswitch_minion_get_dl_status(device, link->linkNumber,
4398                                     NV_NVLSTAT_RX00, 0, &data);
4399             if (status != NVL_SUCCESS)
4400             {
4401                 return status;
4402             }
4403             data = DRF_VAL(_NVLSTAT, _RX00, _REPLAY_EVENTS_VALUE, data);
4404         }
4405         else
4406         {
4407             // MINION disabled
4408             data = 0;
4409         }
4410 
4411         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)]
4412             = data;
4413     }
4414 
4415     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)
4416     {
4417         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)] = 0;
4418     }
4419 
4420     if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)
4421     {
4422         ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)] = 0;
4423     }
4424 
4425     return NVL_SUCCESS;
4426 }
4427 
4428 static void
nvswitch_ctrl_clear_throughput_counters_lr10(nvswitch_device * device,nvlink_link * link,NvU32 counterMask)4429 nvswitch_ctrl_clear_throughput_counters_lr10
4430 (
4431     nvswitch_device *device,
4432     nvlink_link     *link,
4433     NvU32            counterMask
4434 )
4435 {
4436     NvU32 data;
4437 
4438     // TX
4439     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL);
4440     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX0)
4441     {
4442         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX0, 0x1, data);
4443     }
4444     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_TX1)
4445     {
4446         data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETTX1, 0x1, data);
4447     }
4448     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4449 
4450     // RX
4451     data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL);
4452     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX0)
4453     {
4454         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX0, 0x1, data);
4455     }
4456     if (counterMask & NVSWITCH_NVLINK_COUNTER_TL_RX1)
4457     {
4458         data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _RESETRX1, 0x1, data);
4459     }
4460     NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, data);
4461 }
4462 
4463 static NvlStatus
nvswitch_ctrl_clear_dl_error_counters_lr10(nvswitch_device * device,nvlink_link * link,NvU32 counterMask)4464 nvswitch_ctrl_clear_dl_error_counters_lr10
4465 (
4466     nvswitch_device *device,
4467     nvlink_link     *link,
4468     NvU32            counterMask
4469 )
4470 {
4471     NvU32           data;
4472 
4473     if ((!counterMask) ||
4474         (!(counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4475                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4476                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4477                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4478                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4479                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4480                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4481                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4482                           NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS |
4483                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY |
4484                           NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY))))
4485     {
4486         NVSWITCH_PRINT(device, INFO,
4487             "%s: Link%d: No error count clear request, counterMask (0x%x). Returning!\n",
4488             __FUNCTION__, link->linkNumber, counterMask);
4489         return NVL_SUCCESS;
4490     }
4491 
4492     // With Minion initialized, send command to minion
4493     if (nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)))
4494     {
4495         return nvswitch_minion_clear_dl_error_counters_lr10(device, link->linkNumber);
4496     }
4497 
4498     // With Minion not-initialized, perform with the registers
4499     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)
4500     {
4501         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4502         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_FLIT_CRC, _CLEAR, data);
4503         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4504         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4505     }
4506 
4507     if (counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 |
4508                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 |
4509                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 |
4510                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 |
4511                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 |
4512                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 |
4513                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 |
4514                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 |
4515                NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS))
4516     {
4517         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL);
4518         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_LANE_CRC, _CLEAR, data);
4519         data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data);
4520         if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS)
4521         {
4522             data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_ECC_COUNTS, _CLEAR, data);
4523         }
4524         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data);
4525     }
4526 
4527     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)
4528     {
4529         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL);
4530         data = FLD_SET_DRF(_NVLDL_TX, _ERROR_COUNT_CTRL, _CLEAR_REPLAY, _CLEAR, data);
4531         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _ERROR_COUNT_CTRL, data);
4532     }
4533 
4534     if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)
4535     {
4536         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL);
4537         data = FLD_SET_DRF(_NVLDL_TOP, _ERROR_COUNT_CTRL, _CLEAR_RECOVERY, _CLEAR, data);
4538         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _ERROR_COUNT_CTRL, data);
4539     }
4540     return NVL_SUCCESS;
4541 }
4542 
4543 /*
4544  * CTRL_NVSWITCH_GET_INFO
4545  *
4546  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4547  * This provides a single API to query for multiple pieces of miscellaneous
4548  * information via a single call.
4549  *
4550  */
4551 
4552 static NvU32
_nvswitch_get_info_chip_id(nvswitch_device * device)4553 _nvswitch_get_info_chip_id
4554 (
4555     nvswitch_device *device
4556 )
4557 {
4558     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4559 
4560     return (DRF_VAL(_PSMC, _BOOT_42, _CHIP_ID, val));
4561 }
4562 
4563 static NvU32
_nvswitch_get_info_revision_major(nvswitch_device * device)4564 _nvswitch_get_info_revision_major
4565 (
4566     nvswitch_device *device
4567 )
4568 {
4569     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4570 
4571     return (DRF_VAL(_PSMC, _BOOT_42, _MAJOR_REVISION, val));
4572 }
4573 
4574 static NvU32
_nvswitch_get_info_revision_minor(nvswitch_device * device)4575 _nvswitch_get_info_revision_minor
4576 (
4577     nvswitch_device *device
4578 )
4579 {
4580     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4581 
4582     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_REVISION, val));
4583 }
4584 
4585 static NvU32
_nvswitch_get_info_revision_minor_ext(nvswitch_device * device)4586 _nvswitch_get_info_revision_minor_ext
4587 (
4588     nvswitch_device *device
4589 )
4590 {
4591     NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42);
4592 
4593     return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_EXTENDED_REVISION, val));
4594 }
4595 
4596 static NvBool
_nvswitch_inforom_bbx_supported(nvswitch_device * device)4597 _nvswitch_inforom_bbx_supported
4598 (
4599     nvswitch_device *device
4600 )
4601 {
4602     return NV_FALSE;
4603 }
4604 
4605 /*
4606  * CTRL_NVSWITCH_GET_INFO
4607  *
4608  * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO
4609  * This provides a single API to query for multiple pieces of miscellaneous
4610  * information via a single call.
4611  *
4612  */
4613 
4614 NvlStatus
nvswitch_ctrl_get_info_lr10(nvswitch_device * device,NVSWITCH_GET_INFO * p)4615 nvswitch_ctrl_get_info_lr10
4616 (
4617     nvswitch_device *device,
4618     NVSWITCH_GET_INFO *p
4619 )
4620 {
4621     NvlStatus retval = NVL_SUCCESS;
4622     NvU32 i;
4623 
4624     if (p->count > NVSWITCH_GET_INFO_COUNT_MAX)
4625     {
4626         NVSWITCH_PRINT(device, ERROR,
4627             "%s: Invalid args\n",
4628             __FUNCTION__);
4629         return -NVL_BAD_ARGS;
4630     }
4631 
4632     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_INFO_COUNT_MAX);
4633 
4634     for (i = 0; i < p->count; i++)
4635     {
4636         switch (p->index[i])
4637         {
4638             case NVSWITCH_GET_INFO_INDEX_ARCH:
4639                 p->info[i] = device->chip_arch;
4640                 break;
4641             case NVSWITCH_GET_INFO_INDEX_PLATFORM:
4642                 if (IS_RTLSIM(device))
4643                 {
4644                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_RTLSIM;
4645                 }
4646                 else if (IS_FMODEL(device))
4647                 {
4648                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_FMODEL;
4649                 }
4650                 else if (IS_EMULATION(device))
4651                 {
4652                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_EMULATION;
4653                 }
4654                 else
4655                 {
4656                     p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_SILICON;
4657                 }
4658                 break;
4659             case NVSWITCH_GET_INFO_INDEX_IMPL:
4660                 p->info[i] = device->chip_impl;
4661                 break;
4662             case NVSWITCH_GET_INFO_INDEX_CHIPID:
4663                 p->info[i] = _nvswitch_get_info_chip_id(device);
4664                 break;
4665             case NVSWITCH_GET_INFO_INDEX_REVISION_MAJOR:
4666                 p->info[i] = _nvswitch_get_info_revision_major(device);
4667                 break;
4668             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR:
4669                 p->info[i] = _nvswitch_get_info_revision_minor(device);
4670                 break;
4671             case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR_EXT:
4672                 p->info[i] = _nvswitch_get_info_revision_minor_ext(device);
4673                 break;
4674             case NVSWITCH_GET_INFO_INDEX_DEVICE_ID:
4675                 p->info[i] = device->nvlink_device->pciInfo.pciDeviceId;
4676                 break;
4677             case NVSWITCH_GET_INFO_INDEX_NUM_PORTS:
4678                 p->info[i] = NVSWITCH_LINK_COUNT(device);
4679                 break;
4680             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_31_0:
4681                 p->info[i] = NvU64_LO32(nvswitch_get_enabled_link_mask(device));
4682                 break;
4683             case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_63_32:
4684                 p->info[i] = NvU64_HI32(nvswitch_get_enabled_link_mask(device));
4685                 break;
4686             case NVSWITCH_GET_INFO_INDEX_NUM_VCS:
4687                 p->info[i] = _nvswitch_get_num_vcs_lr10(device);
4688                 break;
4689             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE:
4690                 {
4691                     NvU32 remap_ram_sel;
4692                     NvlStatus status;
4693 
4694                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_PRIMARY, &remap_ram_sel);
4695                     if (status == NVL_SUCCESS)
4696                     {
4697                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4698                     }
4699                     else
4700                     {
4701                         p->info[i] = 0;
4702                     }
4703                 }
4704                 break;
4705             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTA_TABLE_SIZE:
4706                 {
4707                     NvU32 remap_ram_sel;
4708                     NvlStatus status;
4709 
4710                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTA, &remap_ram_sel);
4711                     if (status == NVL_SUCCESS)
4712                     {
4713                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4714                     }
4715                     else
4716                     {
4717                         p->info[i] = 0;
4718                     }
4719                 }
4720                 break;
4721             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTB_TABLE_SIZE:
4722                 {
4723                     NvU32 remap_ram_sel;
4724                     NvlStatus status;
4725 
4726                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTB, &remap_ram_sel);
4727                     if (status == NVL_SUCCESS)
4728                     {
4729                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4730                     }
4731                     else
4732                     {
4733                         p->info[i] = 0;
4734                     }
4735                 }
4736                 break;
4737             case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_MULTICAST_TABLE_SIZE:
4738                 {
4739                     NvU32 remap_ram_sel;
4740                     NvlStatus status;
4741 
4742                     status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_MULTICAST, &remap_ram_sel);
4743                     if (status == NVL_SUCCESS)
4744                     {
4745                         p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel);
4746                     }
4747                     else
4748                     {
4749                         p->info[i] = 0;
4750                     }
4751                 }
4752                 break;
4753             case NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE:
4754                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM);
4755                 break;
4756             case NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE:
4757                 p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM);
4758                 break;
4759             case NVSWITCH_GET_INFO_INDEX_FREQ_KHZ:
4760                 p->info[i] = device->switch_pll.freq_khz;
4761                 break;
4762             case NVSWITCH_GET_INFO_INDEX_VCOFREQ_KHZ:
4763                 p->info[i] = device->switch_pll.vco_freq_khz;
4764                 break;
4765             case NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT:
4766                 retval = -NVL_ERR_NOT_SUPPORTED;
4767                 break;
4768             case NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID:
4769                 p->info[i] = nvswitch_read_physical_id(device);
4770                 break;
4771             case NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN:
4772                 p->info[i] = device->nvlink_device->pciInfo.domain;
4773                 break;
4774             case NVSWITCH_GET_INFO_INDEX_PCI_BUS:
4775                 p->info[i] = device->nvlink_device->pciInfo.bus;
4776                 break;
4777             case NVSWITCH_GET_INFO_INDEX_PCI_DEVICE:
4778                 p->info[i] = device->nvlink_device->pciInfo.device;
4779                 break;
4780             case NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION:
4781                 p->info[i] = device->nvlink_device->pciInfo.function;
4782                 break;
4783             default:
4784                 NVSWITCH_PRINT(device, ERROR,
4785                     "%s: Undefined NVSWITCH_GET_INFO_INDEX 0x%x\n",
4786                     __FUNCTION__,
4787                     p->index[i]);
4788                 retval = -NVL_BAD_ARGS;
4789                 break;
4790         }
4791     }
4792 
4793     return retval;
4794 }
4795 
4796 NvlStatus
nvswitch_set_nport_port_config_lr10(nvswitch_device * device,NVSWITCH_SET_SWITCH_PORT_CONFIG * p)4797 nvswitch_set_nport_port_config_lr10
4798 (
4799     nvswitch_device *device,
4800     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4801 )
4802 {
4803     NvU32   val;
4804 
4805     if (p->requesterLinkID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGID))
4806     {
4807         NVSWITCH_PRINT(device, ERROR,
4808             "%s: Invalid requester RID 0x%x\n",
4809             __FUNCTION__, p->requesterLinkID);
4810         return -NVL_BAD_ARGS;
4811     }
4812 
4813     if (p->requesterLanID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGLAN))
4814     {
4815         NVSWITCH_PRINT(device, ERROR,
4816             "%s: Invalid requester RLAN 0x%x\n",
4817             __FUNCTION__, p->requesterLanID);
4818         return -NVL_BAD_ARGS;
4819     }
4820 
4821     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL);
4822     switch (p->type)
4823     {
4824         case CONNECT_ACCESS_GPU:
4825         case CONNECT_ACCESS_CPU:
4826         case CONNECT_ACCESS_SWITCH:
4827             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, val);
4828             break;
4829         case CONNECT_TRUNK_SWITCH:
4830             val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, val);
4831             break;
4832         default:
4833             NVSWITCH_PRINT(device, ERROR,
4834                 "%s: invalid type #%d\n",
4835                 __FUNCTION__, p->type);
4836             return -NVL_BAD_ARGS;
4837     }
4838 
4839     switch(p->count)
4840     {
4841         case CONNECT_COUNT_512:
4842             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _512, val);
4843             break;
4844         case CONNECT_COUNT_1024:
4845             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, val);
4846             break;
4847         case CONNECT_COUNT_2048:
4848             val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, val);
4849             break;
4850         default:
4851             NVSWITCH_PRINT(device, ERROR,
4852                 "%s: invalid count #%d\n",
4853                 __FUNCTION__, p->count);
4854             return -NVL_BAD_ARGS;
4855     }
4856     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val);
4857 
4858     NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _REQLINKID,
4859         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID, p->requesterLinkID) |
4860         DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGLAN, p->requesterLanID));
4861 
4862     return NVL_SUCCESS;
4863 }
4864 
4865 NvlStatus
nvswitch_ctrl_set_switch_port_config_lr10(nvswitch_device * device,NVSWITCH_SET_SWITCH_PORT_CONFIG * p)4866 nvswitch_ctrl_set_switch_port_config_lr10
4867 (
4868     nvswitch_device *device,
4869     NVSWITCH_SET_SWITCH_PORT_CONFIG *p
4870 )
4871 {
4872     nvlink_link *link;
4873     NvU32 val;
4874     NvlStatus status;
4875 
4876     if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->portNum, NPORT))
4877     {
4878         NVSWITCH_PRINT(device, ERROR,
4879             "%s: invalid link #%d\n",
4880             __FUNCTION__, p->portNum);
4881         return -NVL_BAD_ARGS;
4882     }
4883 
4884     if (p->enableVC1 && (p->type != CONNECT_TRUNK_SWITCH))
4885     {
4886         NVSWITCH_PRINT(device, ERROR,
4887             "%s: VC1 only allowed on trunk links\n",
4888             __FUNCTION__);
4889         return -NVL_BAD_ARGS;
4890     }
4891 
4892     // Validate chip-specific NPORT settings and program port config settings.
4893     status = nvswitch_set_nport_port_config(device, p);
4894     if (status != NVL_SUCCESS)
4895     {
4896         return status;
4897     }
4898 
4899     link = nvswitch_get_link(device, (NvU8)p->portNum);
4900     if (link == NULL)
4901     {
4902         NVSWITCH_PRINT(device, ERROR,
4903             "%s: invalid link\n",
4904             __FUNCTION__);
4905         return -NVL_ERR_INVALID_STATE;
4906     }
4907 
4908     //
4909     // If ac_coupled_mask is configured during nvswitch_create_link,
4910     // give preference to it.
4911     //
4912     if (device->regkeys.ac_coupled_mask  ||
4913         device->regkeys.ac_coupled_mask2 ||
4914         device->firmware.nvlink.link_ac_coupled_mask)
4915     {
4916         if (link->ac_coupled != p->acCoupled)
4917         {
4918             NVSWITCH_PRINT(device, ERROR,
4919                 "%s: port[%d]: Unsupported AC coupled change (%s)\n",
4920                 __FUNCTION__, p->portNum, p->acCoupled ? "AC" : "DC");
4921             return -NVL_BAD_ARGS;
4922         }
4923     }
4924 
4925     link->ac_coupled = p->acCoupled;
4926 
4927     // AC vs DC mode SYSTEM register
4928     if (link->ac_coupled)
4929     {
4930         //
4931         // In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command
4932         // Here we just setup the register with the proper info
4933         //
4934         val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK,
4935                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL);
4936         val = FLD_SET_DRF(_NVLIPT_LNK,
4937                 _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val);
4938         NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
4939                 _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val);
4940     }
4941 
4942     // If _BUFFER_RDY is asserted, credits are locked.
4943     val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL_BUFFER_READY);
4944     if (FLD_TEST_DRF(_NPORT, _CTRL_BUFFER_READY, _BUFFERRDY, _ENABLE, val))
4945     {
4946         NVSWITCH_PRINT(device, SETUP,
4947             "%s: port[%d]: BUFFERRDY already enabled.\n",
4948             __FUNCTION__, p->portNum);
4949         return NVL_SUCCESS;
4950     }
4951 
4952     return NVL_SUCCESS;
4953 }
4954 
4955 NvlStatus
nvswitch_ctrl_set_ingress_request_table_lr10(nvswitch_device * device,NVSWITCH_SET_INGRESS_REQUEST_TABLE * p)4956 nvswitch_ctrl_set_ingress_request_table_lr10
4957 (
4958     nvswitch_device *device,
4959     NVSWITCH_SET_INGRESS_REQUEST_TABLE *p
4960 )
4961 {
4962     return -NVL_ERR_NOT_SUPPORTED;
4963 }
4964 
4965 NvlStatus
nvswitch_ctrl_get_ingress_request_table_lr10(nvswitch_device * device,NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS * params)4966 nvswitch_ctrl_get_ingress_request_table_lr10
4967 (
4968     nvswitch_device *device,
4969     NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params
4970 )
4971 {
4972     return -NVL_ERR_NOT_SUPPORTED;
4973 }
4974 
4975 NvlStatus
nvswitch_ctrl_set_ingress_request_valid_lr10(nvswitch_device * device,NVSWITCH_SET_INGRESS_REQUEST_VALID * p)4976 nvswitch_ctrl_set_ingress_request_valid_lr10
4977 (
4978     nvswitch_device *device,
4979     NVSWITCH_SET_INGRESS_REQUEST_VALID *p
4980 )
4981 {
4982     return -NVL_ERR_NOT_SUPPORTED;
4983 }
4984 
4985 NvlStatus
nvswitch_ctrl_get_ingress_response_table_lr10(nvswitch_device * device,NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS * params)4986 nvswitch_ctrl_get_ingress_response_table_lr10
4987 (
4988     nvswitch_device *device,
4989     NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params
4990 )
4991 {
4992     return -NVL_ERR_NOT_SUPPORTED;
4993 }
4994 
4995 
4996 NvlStatus
nvswitch_ctrl_set_ingress_response_table_lr10(nvswitch_device * device,NVSWITCH_SET_INGRESS_RESPONSE_TABLE * p)4997 nvswitch_ctrl_set_ingress_response_table_lr10
4998 (
4999     nvswitch_device *device,
5000     NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p
5001 )
5002 {
5003     return -NVL_ERR_NOT_SUPPORTED;
5004 }
5005 
5006 static NvlStatus
nvswitch_ctrl_set_ganged_link_table_lr10(nvswitch_device * device,NVSWITCH_SET_GANGED_LINK_TABLE * p)5007 nvswitch_ctrl_set_ganged_link_table_lr10
5008 (
5009     nvswitch_device *device,
5010     NVSWITCH_SET_GANGED_LINK_TABLE *p
5011 )
5012 {
5013     return -NVL_ERR_NOT_SUPPORTED;
5014 }
5015 
5016 static NvlStatus
nvswitch_ctrl_get_internal_latency_lr10(nvswitch_device * device,NVSWITCH_GET_INTERNAL_LATENCY * pLatency)5017 nvswitch_ctrl_get_internal_latency_lr10
5018 (
5019     nvswitch_device *device,
5020     NVSWITCH_GET_INTERNAL_LATENCY *pLatency
5021 )
5022 {
5023     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5024     NvU32 vc_selector = pLatency->vc_selector;
5025     NvU32 idx_nport;
5026 
5027     // Validate VC selector
5028     if (vc_selector >= NVSWITCH_NUM_VCS_LR10)
5029     {
5030         return -NVL_BAD_ARGS;
5031     }
5032 
5033     nvswitch_os_memset(pLatency, 0, sizeof(*pLatency));
5034     pLatency->vc_selector = vc_selector;
5035 
5036     for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5037     {
5038         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport))
5039         {
5040             continue;
5041         }
5042 
5043         pLatency->egressHistogram[idx_nport].low =
5044             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low;
5045         pLatency->egressHistogram[idx_nport].medium =
5046             chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium;
5047         pLatency->egressHistogram[idx_nport].high =
5048            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high;
5049         pLatency->egressHistogram[idx_nport].panic =
5050            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic;
5051         pLatency->egressHistogram[idx_nport].count =
5052            chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count;
5053     }
5054 
5055     pLatency->elapsed_time_msec =
5056       (chip_device->latency_stats->latency[vc_selector].last_read_time_nsec -
5057        chip_device->latency_stats->latency[vc_selector].start_time_nsec)/1000000ULL;
5058 
5059     chip_device->latency_stats->latency[vc_selector].start_time_nsec =
5060         chip_device->latency_stats->latency[vc_selector].last_read_time_nsec;
5061 
5062     chip_device->latency_stats->latency[vc_selector].count = 0;
5063 
5064     // Clear accum_latency[]
5065     for (idx_nport = 0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++)
5066     {
5067         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low = 0;
5068         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium = 0;
5069         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high = 0;
5070         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic = 0;
5071         chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count = 0;
5072     }
5073 
5074     return NVL_SUCCESS;
5075 }
5076 
5077 NvlStatus
nvswitch_ctrl_set_latency_bins_lr10(nvswitch_device * device,NVSWITCH_SET_LATENCY_BINS * pLatency)5078 nvswitch_ctrl_set_latency_bins_lr10
5079 (
5080     nvswitch_device *device,
5081     NVSWITCH_SET_LATENCY_BINS *pLatency
5082 )
5083 {
5084     NvU32 vc_selector;
5085     const NvU32 freq_mhz = 1330;
5086     const NvU32 switchpll_hz = freq_mhz * 1000000ULL; // TODO: Update this with device->switch_pll.freq_khz after LR10 PLL update
5087     const NvU32 min_threshold = 10;   // Must be > zero to avoid div by zero
5088     const NvU32 max_threshold = 10000;
5089 
5090     // Quick input validation and ns to register value conversion
5091     for (vc_selector = 0; vc_selector < NVSWITCH_NUM_VCS_LR10; vc_selector++)
5092     {
5093         if ((pLatency->bin[vc_selector].lowThreshold > max_threshold)                           ||
5094             (pLatency->bin[vc_selector].lowThreshold < min_threshold)                           ||
5095             (pLatency->bin[vc_selector].medThreshold > max_threshold)                           ||
5096             (pLatency->bin[vc_selector].medThreshold < min_threshold)                           ||
5097             (pLatency->bin[vc_selector].hiThreshold  > max_threshold)                           ||
5098             (pLatency->bin[vc_selector].hiThreshold  < min_threshold)                           ||
5099             (pLatency->bin[vc_selector].lowThreshold > pLatency->bin[vc_selector].medThreshold) ||
5100             (pLatency->bin[vc_selector].medThreshold > pLatency->bin[vc_selector].hiThreshold))
5101         {
5102             return -NVL_BAD_ARGS;
5103         }
5104 
5105         pLatency->bin[vc_selector].lowThreshold =
5106             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].lowThreshold);
5107         pLatency->bin[vc_selector].medThreshold =
5108             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].medThreshold);
5109         pLatency->bin[vc_selector].hiThreshold =
5110             switchpll_hz / (1000000000 / pLatency->bin[vc_selector].hiThreshold);
5111 
5112         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _LOW,    vc_selector, pLatency->bin[vc_selector].lowThreshold);
5113         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _MEDIUM, vc_selector, pLatency->bin[vc_selector].medThreshold);
5114         NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _HIGH,   vc_selector, pLatency->bin[vc_selector].hiThreshold);
5115     }
5116 
5117     return NVL_SUCCESS;
5118 }
5119 
5120 #define NV_NPORT_REQLINKID_REQROUTINGLAN_1024  18:18
5121 #define NV_NPORT_REQLINKID_REQROUTINGLAN_2048  18:17
5122 
5123 /*
5124  * @brief Returns the ingress requester link id.
5125  *
5126  * On LR10, REQROUTINGID only gives the endpoint but not the specific port of the response packet.
5127  * To identify the specific port, the routing_ID must be appended with the upper bits of REQROUTINGLAN.
5128  *
5129  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 1024, the upper bit of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[9].
5130  * When NV_NPORT_CTRL_ENDPOINT_COUNT = 2048, the upper two bits of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[10:9].
5131  *
5132  * @param[in] device            nvswitch device
5133  * @param[in] params            NVSWITCH_GET_INGRESS_REQLINKID_PARAMS
5134  *
5135  * @returns                     NVL_SUCCESS if action succeeded,
5136  *                              -NVL_ERR_INVALID_STATE invalid link
5137  */
5138 NvlStatus
nvswitch_ctrl_get_ingress_reqlinkid_lr10(nvswitch_device * device,NVSWITCH_GET_INGRESS_REQLINKID_PARAMS * params)5139 nvswitch_ctrl_get_ingress_reqlinkid_lr10
5140 (
5141     nvswitch_device *device,
5142     NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params
5143 )
5144 {
5145     NvU32 regval;
5146     NvU32 reqRid;
5147     NvU32 reqRlan;
5148     NvU32 rlan_shift = DRF_SHIFT_RT(NV_NPORT_REQLINKID_REQROUTINGID) + 1;
5149 
5150     if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum))
5151     {
5152         return -NVL_BAD_ARGS;
5153     }
5154 
5155     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _REQLINKID);
5156     reqRid = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID, regval);
5157     reqRlan = regval;
5158 
5159     regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _CTRL);
5160     if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, regval))
5161     {
5162         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_1024, reqRlan);
5163         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5164     }
5165     else if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, regval))
5166     {
5167         reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_2048, reqRlan);
5168         params->requesterLinkID = (reqRid | (reqRlan << rlan_shift));
5169     }
5170     else
5171     {
5172         params->requesterLinkID = reqRid;
5173     }
5174 
5175     return NVL_SUCCESS;
5176 }
5177 
5178 /*
5179  * REGISTER_READ/_WRITE
5180  * Provides direct access to the MMIO space for trusted clients like MODS.
5181  * This API should not be exposed to unsecure clients.
5182  */
5183 
5184 /*
5185  * _nvswitch_get_engine_base
5186  * Used by REGISTER_READ/WRITE API.  Looks up an engine based on device/instance
5187  * and returns the base address in BAR0.
5188  *
5189  * register_rw_engine   [in] REGISTER_RW_ENGINE_*
5190  * instance             [in] physical instance of device
5191  * bcast                [in] FALSE: find unicast base address
5192  *                           TRUE:  find broadcast base address
5193  * base_addr            [out] base address in BAR0 of requested device
5194  *
5195  * Returns              NVL_SUCCESS: Device base address successfully found
5196  *                      else device lookup failed
5197  */
5198 
5199 static NvlStatus
_nvswitch_get_engine_base_lr10(nvswitch_device * device,NvU32 register_rw_engine,NvU32 instance,NvBool bcast,NvU32 * base_addr)5200 _nvswitch_get_engine_base_lr10
5201 (
5202     nvswitch_device *device,
5203     NvU32   register_rw_engine,     // REGISTER_RW_ENGINE_*
5204     NvU32   instance,               // device instance
5205     NvBool  bcast,
5206     NvU32   *base_addr
5207 )
5208 {
5209     NvU32 base = 0;
5210     ENGINE_DESCRIPTOR_TYPE_LR10  *engine = NULL;
5211     NvlStatus retval = NVL_SUCCESS;
5212     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5213 
5214     // Find the engine descriptor matching the request
5215     engine = NULL;
5216 
5217     switch (register_rw_engine)
5218     {
5219         case REGISTER_RW_ENGINE_RAW:
5220             // Special case raw IO
5221             if ((instance != 0) ||
5222                 (bcast != NV_FALSE))
5223             {
5224                 retval = -NVL_BAD_ARGS;
5225             }
5226         break;
5227 
5228         case REGISTER_RW_ENGINE_CLKS:
5229         case REGISTER_RW_ENGINE_FUSE:
5230         case REGISTER_RW_ENGINE_JTAG:
5231         case REGISTER_RW_ENGINE_PMGR:
5232         case REGISTER_RW_ENGINE_XP3G:
5233             //
5234             // Legacy devices are always single-instance, unicast-only.
5235             // These manuals are BAR0 offset-based, not IP-based.  Treat them
5236             // the same as RAW.
5237             //
5238             if ((instance != 0) ||
5239                 (bcast != NV_FALSE))
5240             {
5241                 retval = -NVL_BAD_ARGS;
5242             }
5243             register_rw_engine = REGISTER_RW_ENGINE_RAW;
5244         break;
5245 
5246         case REGISTER_RW_ENGINE_SAW:
5247             if (bcast)
5248             {
5249                 retval = -NVL_BAD_ARGS;
5250             }
5251             else
5252             {
5253                 if (NVSWITCH_ENG_VALID_LR10(device, SAW, instance))
5254                 {
5255                     engine = &chip_device->engSAW[instance];
5256                 }
5257             }
5258         break;
5259 
5260         case REGISTER_RW_ENGINE_XVE:
5261             if (bcast)
5262             {
5263                 retval = -NVL_BAD_ARGS;
5264             }
5265             else
5266             {
5267                 if (NVSWITCH_ENG_VALID_LR10(device, XVE, instance))
5268                 {
5269                     engine = &chip_device->engXVE[instance];
5270                 }
5271             }
5272         break;
5273 
5274         case REGISTER_RW_ENGINE_SOE:
5275             if (bcast)
5276             {
5277                 retval = -NVL_BAD_ARGS;
5278             }
5279             else
5280             {
5281                 if (NVSWITCH_ENG_VALID_LR10(device, SOE, instance))
5282                 {
5283                     engine = &chip_device->engSOE[instance];
5284                 }
5285             }
5286         break;
5287 
5288         case REGISTER_RW_ENGINE_SE:
5289             if (bcast)
5290             {
5291                 retval = -NVL_BAD_ARGS;
5292             }
5293             else
5294             {
5295                 if (NVSWITCH_ENG_VALID_LR10(device, SE, instance))
5296                 {
5297                     engine = &chip_device->engSE[instance];
5298                 }
5299             }
5300         break;
5301 
5302         case REGISTER_RW_ENGINE_NVLW:
5303             if (bcast)
5304             {
5305                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW_BCAST, instance))
5306                 {
5307                     engine = &chip_device->engNVLW_BCAST[instance];
5308                 }
5309             }
5310             else
5311             {
5312                 if (NVSWITCH_ENG_VALID_LR10(device, NVLW, instance))
5313                 {
5314                     engine = &chip_device->engNVLW[instance];
5315                 }
5316             }
5317         break;
5318 
5319         case REGISTER_RW_ENGINE_MINION:
5320             if (bcast)
5321             {
5322                 if (NVSWITCH_ENG_VALID_LR10(device, MINION_BCAST, instance))
5323                 {
5324                     engine = &chip_device->engMINION_BCAST[instance];
5325                 }
5326             }
5327             else
5328             {
5329                 if (NVSWITCH_ENG_VALID_LR10(device, MINION, instance))
5330                 {
5331                     engine = &chip_device->engMINION[instance];
5332                 }
5333             }
5334         break;
5335 
5336         case REGISTER_RW_ENGINE_NVLIPT:
5337             if (bcast)
5338             {
5339                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_BCAST, instance))
5340                 {
5341                     engine = &chip_device->engNVLIPT_BCAST[instance];
5342                 }
5343             }
5344             else
5345             {
5346                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, instance))
5347                 {
5348                     engine = &chip_device->engNVLIPT[instance];
5349                 }
5350             }
5351         break;
5352 
5353         case REGISTER_RW_ENGINE_NVLTLC:
5354             if (bcast)
5355             {
5356                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_BCAST, instance))
5357                 {
5358                     engine = &chip_device->engNVLTLC_BCAST[instance];
5359                 }
5360             }
5361             else
5362             {
5363                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC, instance))
5364                 {
5365                     engine = &chip_device->engNVLTLC[instance];
5366                 }
5367             }
5368         break;
5369 
5370         case REGISTER_RW_ENGINE_NVLTLC_MULTICAST:
5371             if (bcast)
5372             {
5373                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST_BCAST, instance))
5374                 {
5375                     engine = &chip_device->engNVLTLC_MULTICAST_BCAST[instance];
5376                 }
5377             }
5378             else
5379             {
5380                 if (NVSWITCH_ENG_VALID_LR10(device, NVLTLC_MULTICAST, instance))
5381                 {
5382                     engine = &chip_device->engNVLTLC_MULTICAST[instance];
5383                 }
5384             }
5385         break;
5386 
5387         case REGISTER_RW_ENGINE_NPG:
5388             if (bcast)
5389             {
5390                 if (NVSWITCH_ENG_VALID_LR10(device, NPG_BCAST, instance))
5391                 {
5392                     engine = &chip_device->engNPG_BCAST[instance];
5393                 }
5394             }
5395             else
5396             {
5397                 if (NVSWITCH_ENG_VALID_LR10(device, NPG, instance))
5398                 {
5399                     engine = &chip_device->engNPG[instance];
5400                 }
5401             }
5402         break;
5403 
5404         case REGISTER_RW_ENGINE_NPORT:
5405             if (bcast)
5406             {
5407                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_BCAST, instance))
5408                 {
5409                     engine = &chip_device->engNPORT_BCAST[instance];
5410                 }
5411             }
5412             else
5413             {
5414                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT, instance))
5415                 {
5416                     engine = &chip_device->engNPORT[instance];
5417                 }
5418             }
5419         break;
5420 
5421         case REGISTER_RW_ENGINE_NPORT_MULTICAST:
5422             if (bcast)
5423             {
5424                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST_BCAST, instance))
5425                 {
5426                     engine = &chip_device->engNPORT_MULTICAST_BCAST[instance];
5427                 }
5428             }
5429             else
5430             {
5431                 if (NVSWITCH_ENG_VALID_LR10(device, NPORT_MULTICAST, instance))
5432                 {
5433                     engine = &chip_device->engNPORT_MULTICAST[instance];
5434                 }
5435             }
5436         break;
5437 
5438         case REGISTER_RW_ENGINE_NVLIPT_LNK:
5439             if (bcast)
5440             {
5441                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_BCAST, instance))
5442                 {
5443                     engine = &chip_device->engNVLIPT_LNK_BCAST[instance];
5444                 }
5445             }
5446             else
5447             {
5448                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK, instance))
5449                 {
5450                     engine = &chip_device->engNVLIPT_LNK[instance];
5451                 }
5452             }
5453         break;
5454 
5455         case REGISTER_RW_ENGINE_NVLIPT_LNK_MULTICAST:
5456             if (bcast)
5457             {
5458                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST_BCAST, instance))
5459                 {
5460                     engine = &chip_device->engNVLIPT_LNK_MULTICAST_BCAST[instance];
5461                 }
5462             }
5463             else
5464             {
5465                 if (NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK_MULTICAST, instance))
5466                 {
5467                     engine = &chip_device->engNVLIPT_LNK_MULTICAST[instance];
5468                 }
5469             }
5470         break;
5471 
5472         case REGISTER_RW_ENGINE_PLL:
5473             if (bcast)
5474             {
5475                 if (NVSWITCH_ENG_VALID_LR10(device, PLL_BCAST, instance))
5476                 {
5477                     engine = &chip_device->engPLL_BCAST[instance];
5478                 }
5479             }
5480             else
5481             {
5482                 if (NVSWITCH_ENG_VALID_LR10(device, PLL, instance))
5483                 {
5484                     engine = &chip_device->engPLL[instance];
5485                 }
5486             }
5487         break;
5488 
5489         case REGISTER_RW_ENGINE_NVLDL:
5490             if (bcast)
5491             {
5492                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_BCAST, instance))
5493                 {
5494                     engine = &chip_device->engNVLDL_BCAST[instance];
5495                 }
5496             }
5497             else
5498             {
5499                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL, instance))
5500                 {
5501                     engine = &chip_device->engNVLDL[instance];
5502                 }
5503             }
5504         break;
5505 
5506         case REGISTER_RW_ENGINE_NVLDL_MULTICAST:
5507             if (bcast)
5508             {
5509                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST_BCAST, instance))
5510                 {
5511                     engine = &chip_device->engNVLDL_MULTICAST_BCAST[instance];
5512                 }
5513             }
5514             else
5515             {
5516                 if (NVSWITCH_ENG_VALID_LR10(device, NVLDL_MULTICAST, instance))
5517                 {
5518                     engine = &chip_device->engNVLDL_MULTICAST[instance];
5519                 }
5520             }
5521         break;
5522 
5523         case REGISTER_RW_ENGINE_NXBAR:
5524             if (bcast)
5525             {
5526                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR_BCAST, instance))
5527                 {
5528                     engine = &chip_device->engNXBAR_BCAST[instance];
5529                 }
5530             }
5531             else
5532             {
5533                 if (NVSWITCH_ENG_VALID_LR10(device, NXBAR, instance))
5534                 {
5535                     engine = &chip_device->engNXBAR[instance];
5536                 }
5537             }
5538         break;
5539 
5540         case REGISTER_RW_ENGINE_TILE:
5541             if (bcast)
5542             {
5543                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_BCAST, instance))
5544                 {
5545                     engine = &chip_device->engTILE_BCAST[instance];
5546                 }
5547             }
5548             else
5549             {
5550                 if (NVSWITCH_ENG_VALID_LR10(device, TILE, instance))
5551                 {
5552                     engine = &chip_device->engTILE[instance];
5553                 }
5554             }
5555         break;
5556 
5557         case REGISTER_RW_ENGINE_TILE_MULTICAST:
5558             if (bcast)
5559             {
5560                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST_BCAST, instance))
5561                 {
5562                     engine = &chip_device->engTILE_MULTICAST_BCAST[instance];
5563                 }
5564             }
5565             else
5566             {
5567                 if (NVSWITCH_ENG_VALID_LR10(device, TILE_MULTICAST, instance))
5568                 {
5569                     engine = &chip_device->engTILE_MULTICAST[instance];
5570                 }
5571             }
5572         break;
5573 
5574         default:
5575             NVSWITCH_PRINT(device, ERROR,
5576                 "%s: unknown REGISTER_RW_ENGINE 0x%x\n",
5577                 __FUNCTION__,
5578                 register_rw_engine);
5579             engine = NULL;
5580         break;
5581     }
5582 
5583     if (register_rw_engine == REGISTER_RW_ENGINE_RAW)
5584     {
5585         // Raw IO -- client provides full BAR0 offset
5586         base = 0;
5587     }
5588     else
5589     {
5590         // Check engine descriptor was found and valid
5591         if (engine == NULL)
5592         {
5593             retval = -NVL_BAD_ARGS;
5594             NVSWITCH_PRINT(device, ERROR,
5595                 "%s: invalid REGISTER_RW_ENGINE/instance 0x%x(%d)\n",
5596                 __FUNCTION__,
5597                 register_rw_engine,
5598                 instance);
5599         }
5600         else if (!engine->valid)
5601         {
5602             retval = -NVL_UNBOUND_DEVICE;
5603             NVSWITCH_PRINT(device, ERROR,
5604                 "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) disabled or invalid\n",
5605                 __FUNCTION__,
5606                 register_rw_engine,
5607                 instance);
5608         }
5609         else
5610         {
5611             if (bcast && (engine->disc_type == DISCOVERY_TYPE_BROADCAST))
5612             {
5613                 //
5614                 // Caveat emptor: A read of a broadcast register is
5615                 // implementation-specific.
5616                 //
5617                 base = engine->info.bc.bc_addr;
5618             }
5619             else if ((!bcast) && (engine->disc_type == DISCOVERY_TYPE_UNICAST))
5620             {
5621                 base = engine->info.uc.uc_addr;
5622             }
5623 
5624             if (base == 0)
5625             {
5626                 NVSWITCH_PRINT(device, ERROR,
5627                     "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) has %s base address 0!\n",
5628                     __FUNCTION__,
5629                     register_rw_engine,
5630                     instance,
5631                     (bcast ? "BCAST" : "UNICAST" ));
5632                 retval = -NVL_IO_ERROR;
5633             }
5634         }
5635     }
5636 
5637     *base_addr = base;
5638     return retval;
5639 }
5640 
5641 /*
5642  * CTRL_NVSWITCH_REGISTER_READ
5643  *
5644  * This provides direct access to the MMIO space for trusted clients like
5645  * MODS.
5646  * This API should not be exposed to unsecure clients.
5647  */
5648 
5649 static NvlStatus
nvswitch_ctrl_register_read_lr10(nvswitch_device * device,NVSWITCH_REGISTER_READ * p)5650 nvswitch_ctrl_register_read_lr10
5651 (
5652     nvswitch_device *device,
5653     NVSWITCH_REGISTER_READ *p
5654 )
5655 {
5656     NvU32 base;
5657     NvU32 data;
5658     NvlStatus retval = NVL_SUCCESS;
5659 
5660     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, NV_FALSE, &base);
5661     if (retval != NVL_SUCCESS)
5662     {
5663         return retval;
5664     }
5665 
5666     // Make sure target offset isn't out-of-range
5667     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5668     {
5669         return -NVL_IO_ERROR;
5670     }
5671 
5672     //
5673     // Some legacy device manuals are not 0-based (IP style).
5674     //
5675     data = NVSWITCH_OFF_RD32(device, base + p->offset);
5676     p->val = data;
5677 
5678     return NVL_SUCCESS;
5679 }
5680 
5681 /*
5682  * CTRL_NVSWITCH_REGISTER_WRITE
5683  *
5684  * This provides direct access to the MMIO space for trusted clients like
5685  * MODS.
5686  * This API should not be exposed to unsecure clients.
5687  */
5688 
5689 static NvlStatus
nvswitch_ctrl_register_write_lr10(nvswitch_device * device,NVSWITCH_REGISTER_WRITE * p)5690 nvswitch_ctrl_register_write_lr10
5691 (
5692     nvswitch_device *device,
5693     NVSWITCH_REGISTER_WRITE *p
5694 )
5695 {
5696     NvU32 base;
5697     NvlStatus retval = NVL_SUCCESS;
5698 
5699     retval = _nvswitch_get_engine_base_lr10(device, p->engine, p->instance, p->bcast, &base);
5700     if (retval != NVL_SUCCESS)
5701     {
5702         return retval;
5703     }
5704 
5705     // Make sure target offset isn't out-of-range
5706     if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize)
5707     {
5708         return -NVL_IO_ERROR;
5709     }
5710 
5711     //
5712     // Some legacy device manuals are not 0-based (IP style).
5713     //
5714     NVSWITCH_OFF_WR32(device, base + p->offset, p->val);
5715 
5716     return NVL_SUCCESS;
5717 }
5718 
5719 NvlStatus
nvswitch_ctrl_get_bios_info_lr10(nvswitch_device * device,NVSWITCH_GET_BIOS_INFO_PARAMS * p)5720 nvswitch_ctrl_get_bios_info_lr10
5721 (
5722     nvswitch_device *device,
5723     NVSWITCH_GET_BIOS_INFO_PARAMS *p
5724 )
5725 {
5726     NvU32 biosVersionBytes;
5727     NvU32 biosOemVersionBytes;
5728     NvU32 biosMagic = 0x9210;
5729 
5730     //
5731     // Example: 92.10.09.00.00 is the formatted version string
5732     //          |         |  |
5733     //          |         |  |__ BIOS OEM version byte
5734     //          |         |
5735     //          |_________|_____ BIOS version bytes
5736     //
5737     biosVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_6);
5738     biosOemVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_7);
5739 
5740     //
5741     // LR10 is built out of core92 and the BIOS version will always begin with
5742     // 92.10.xx.xx.xx
5743     //
5744     if ((biosVersionBytes >> 16) != biosMagic)
5745     {
5746         NVSWITCH_PRINT(device, ERROR,
5747                 "BIOS version not found in scratch register\n");
5748         return -NVL_ERR_INVALID_STATE;
5749     }
5750 
5751     p->version = (((NvU64)biosVersionBytes) << 8) | (biosOemVersionBytes & 0xff);
5752 
5753     return NVL_SUCCESS;
5754 }
5755 
5756 NvlStatus
nvswitch_ctrl_get_inforom_version_lr10(nvswitch_device * device,NVSWITCH_GET_INFOROM_VERSION_PARAMS * p)5757 nvswitch_ctrl_get_inforom_version_lr10
5758 (
5759     nvswitch_device *device,
5760     NVSWITCH_GET_INFOROM_VERSION_PARAMS *p
5761 )
5762 {
5763 
5764     struct inforom *pInforom = device->pInforom;
5765 
5766     if ((pInforom == NULL) || (!pInforom->IMG.bValid))
5767     {
5768         return -NVL_ERR_NOT_SUPPORTED;
5769     }
5770 
5771     if (NV_ARRAY_ELEMENTS(pInforom->IMG.object.version) <
5772         NVSWITCH_INFOROM_VERSION_LEN)
5773     {
5774         NVSWITCH_PRINT(device, ERROR,
5775                        "Inforom IMG object struct smaller than expected\n");
5776         return -NVL_ERR_INVALID_STATE;
5777     }
5778 
5779     nvswitch_inforom_string_copy(pInforom->IMG.object.version, p->version,
5780                                  NVSWITCH_INFOROM_VERSION_LEN);
5781 
5782     return NVL_SUCCESS;
5783 }
5784 
5785 void
nvswitch_corelib_clear_link_state_lr10(nvlink_link * link)5786 nvswitch_corelib_clear_link_state_lr10
5787 (
5788     nvlink_link *link
5789 )
5790 {
5791     // Receiver Detect needs to happen again
5792     link->bRxDetected = NV_FALSE;
5793 
5794     // INITNEGOTIATE needs to happen again
5795     link->bInitnegotiateConfigGood = NV_FALSE;
5796 
5797     // TxCommonMode needs to happen again
5798     link->bTxCommonModeFail = NV_FALSE;
5799 
5800     // SAFE transition needs to happen again
5801     link->bSafeTransitionFail = NV_FALSE;
5802 
5803     // Reset the SW state tracking the link and sublink states
5804     link->state            = NVLINK_LINKSTATE_OFF;
5805     link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF;
5806     link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF;
5807 }
5808 
5809 const static NvU32 nport_reg_addr[] =
5810 {
5811     NV_NPORT_CTRL,
5812     NV_NPORT_CTRL_SLCG,
5813     NV_NPORT_REQLINKID,
5814     NV_NPORT_PORTSTAT_CONTROL,
5815     NV_NPORT_PORTSTAT_SNAP_CONTROL,
5816     NV_NPORT_PORTSTAT_WINDOW_LIMIT,
5817     NV_NPORT_PORTSTAT_LIMIT_LOW_0,
5818     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0,
5819     NV_NPORT_PORTSTAT_LIMIT_HIGH_0,
5820     NV_NPORT_PORTSTAT_LIMIT_LOW_1,
5821     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1,
5822     NV_NPORT_PORTSTAT_LIMIT_HIGH_1,
5823     NV_NPORT_PORTSTAT_LIMIT_LOW_2,
5824     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2,
5825     NV_NPORT_PORTSTAT_LIMIT_HIGH_2,
5826     NV_NPORT_PORTSTAT_LIMIT_LOW_3,
5827     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3,
5828     NV_NPORT_PORTSTAT_LIMIT_HIGH_3,
5829     NV_NPORT_PORTSTAT_LIMIT_LOW_4,
5830     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4,
5831     NV_NPORT_PORTSTAT_LIMIT_HIGH_4,
5832     NV_NPORT_PORTSTAT_LIMIT_LOW_5,
5833     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5,
5834     NV_NPORT_PORTSTAT_LIMIT_HIGH_5,
5835     NV_NPORT_PORTSTAT_LIMIT_LOW_6,
5836     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6,
5837     NV_NPORT_PORTSTAT_LIMIT_HIGH_6,
5838     NV_NPORT_PORTSTAT_LIMIT_LOW_7,
5839     NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7,
5840     NV_NPORT_PORTSTAT_LIMIT_HIGH_7,
5841     NV_NPORT_PORTSTAT_SOURCE_FILTER_0,
5842     NV_NPORT_PORTSTAT_SOURCE_FILTER_1,
5843     NV_ROUTE_ROUTE_CONTROL,
5844     NV_ROUTE_CMD_ROUTE_TABLE0,
5845     NV_ROUTE_CMD_ROUTE_TABLE1,
5846     NV_ROUTE_CMD_ROUTE_TABLE2,
5847     NV_ROUTE_CMD_ROUTE_TABLE3,
5848     NV_ROUTE_ERR_LOG_EN_0,
5849     NV_ROUTE_ERR_CONTAIN_EN_0,
5850     NV_ROUTE_ERR_ECC_CTRL,
5851     NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT,
5852     NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT,
5853     NV_INGRESS_ERR_LOG_EN_0,
5854     NV_INGRESS_ERR_CONTAIN_EN_0,
5855     NV_INGRESS_ERR_ECC_CTRL,
5856     NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT,
5857     NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT,
5858     NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT,
5859     NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT,
5860     NV_EGRESS_CTRL,
5861     NV_EGRESS_CTO_TIMER_LIMIT,
5862     NV_EGRESS_ERR_LOG_EN_0,
5863     NV_EGRESS_ERR_CONTAIN_EN_0,
5864     NV_EGRESS_ERR_ECC_CTRL,
5865     NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT,
5866     NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT,
5867     NV_TSTATE_TAGSTATECONTROL,
5868     NV_TSTATE_ATO_TIMER_LIMIT,
5869     NV_TSTATE_CREQ_CAM_LOCK,
5870     NV_TSTATE_ERR_LOG_EN_0,
5871     NV_TSTATE_ERR_CONTAIN_EN_0,
5872     NV_TSTATE_ERR_ECC_CTRL,
5873     NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5874     NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT,
5875     NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT,
5876     NV_SOURCETRACK_CTRL,
5877     NV_SOURCETRACK_MULTISEC_TIMER0,
5878     NV_SOURCETRACK_ERR_LOG_EN_0,
5879     NV_SOURCETRACK_ERR_CONTAIN_EN_0,
5880     NV_SOURCETRACK_ERR_ECC_CTRL,
5881     NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5882     NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5883     NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT,
5884 };
5885 
5886 /*
5887  *  Disable interrupts comming from NPG & NVLW blocks.
5888  */
5889 static void
_nvswitch_link_disable_interrupts_lr10(nvswitch_device * device,NvU32 link)5890 _nvswitch_link_disable_interrupts_lr10
5891 (
5892     nvswitch_device *device,
5893     NvU32 link
5894 )
5895 {
5896     NvU32 i;
5897 
5898     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5899         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) |
5900         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) |
5901         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0));
5902 
5903     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5904     {
5905         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5906             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x0) |
5907             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x0) |
5908             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0));
5909 
5910         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5911             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x0) |
5912             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x0) |
5913             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x0));
5914 
5915         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5916             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x0) |
5917             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x0) |
5918             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0));
5919     }
5920 }
5921 
5922 /*
5923  *  Reset NPG & NVLW interrupt state.
5924  */
5925 static void
_nvswitch_link_reset_interrupts_lr10(nvswitch_device * device,NvU32 link)5926 _nvswitch_link_reset_interrupts_lr10
5927 (
5928     nvswitch_device *device,
5929     NvU32 link
5930 )
5931 {
5932     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
5933     NvU32 i;
5934 
5935     NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
5936         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) |
5937         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) |
5938         DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1));
5939 
5940     for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++)
5941     {
5942         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i),
5943             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x1) |
5944             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x1) |
5945             DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x1));
5946 
5947         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i),
5948             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x1) |
5949             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x1) |
5950             DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1));
5951 
5952         NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i),
5953             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x1) |
5954             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x1) |
5955             DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x1));
5956     }
5957 
5958     // Enable interrupts which are disabled to prevent interrupt storm.
5959     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.route.fatal);
5960     NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.route.nonfatal);
5961     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.fatal);
5962     NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.nonfatal);
5963     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.fatal);
5964     NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.nonfatal);
5965     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.fatal);
5966     NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.nonfatal);
5967     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.fatal);
5968     NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.nonfatal);
5969 
5970     // Clear fatal error status
5971     device->link[link].fatal_error_occurred = NV_FALSE;
5972 }
5973 
5974 /*
5975  * @Brief : Control to reset and drain the links.
5976  *
5977  * @param[in] device        A reference to the device to initialize
5978  * @param[in] linkMask      A mask of link(s) to be reset.
5979  *
5980  * @returns :               NVL_SUCCESS if there were no errors
5981  *                         -NVL_BAD_PARAMS if input parameters are wrong.
5982  *                         -NVL_ERR_INVALID_STATE if other errors are present and a full-chip reset is required.
5983  *                         -NVL_INITIALIZATION_TOTAL_FAILURE if NPORT initialization failed and a retry is required.
5984  */
5985 
5986 NvlStatus
nvswitch_reset_and_drain_links_lr10(nvswitch_device * device,NvU64 link_mask,NvBool bForced)5987 nvswitch_reset_and_drain_links_lr10
5988 (
5989     nvswitch_device *device,
5990     NvU64 link_mask,
5991     NvBool bForced
5992 )
5993 {
5994     NvlStatus status = -NVL_ERR_GENERIC;
5995     nvlink_link *link_info;
5996     NvU32 val;
5997     NvU32 link;
5998     NvU32 idx_nport;
5999     NvU32 npg;
6000     NVSWITCH_TIMEOUT timeout;
6001     NvBool           keepPolling;
6002     NvU32 i;
6003     NvU64 link_mode, tx_sublink_mode, rx_sublink_mode;
6004     NvU32 tx_sublink_submode, rx_sublink_submode;
6005     NvU32 *nport_reg_val = NULL;
6006     NvU32 reg_count = NV_ARRAY_ELEMENTS(nport_reg_addr);
6007     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6008 
6009     if ((link_mask == 0) ||
6010         (link_mask >> NVSWITCH_LINK_COUNT(device)))
6011     {
6012         NVSWITCH_PRINT(device, ERROR,
6013             "%s: Invalid link_mask = 0x%llx\n",
6014             __FUNCTION__, link_mask);
6015 
6016         return -NVL_BAD_ARGS;
6017     }
6018 
6019     // Check for in-active links
6020     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6021     {
6022         if (!nvswitch_is_link_valid(device, link))
6023         {
6024             NVSWITCH_PRINT(device, ERROR,
6025                 "%s: link #%d invalid\n",
6026                 __FUNCTION__, link);
6027 
6028             continue;
6029         }
6030         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, link))
6031         {
6032             NVSWITCH_PRINT(device, ERROR,
6033                 "%s: NPORT #%d invalid\n",
6034                 __FUNCTION__, link);
6035 
6036             continue;
6037         }
6038 
6039         if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLW, link))
6040         {
6041             NVSWITCH_PRINT(device, ERROR,
6042                 "%s: NVLW #%d invalid\n",
6043                 __FUNCTION__, link);
6044 
6045             continue;
6046         }
6047     }
6048     FOR_EACH_INDEX_IN_MASK_END;
6049 
6050     // Buffer to backup NPORT state
6051     nport_reg_val = nvswitch_os_malloc(sizeof(nport_reg_addr));
6052     if (nport_reg_val == NULL)
6053     {
6054         NVSWITCH_PRINT(device, ERROR,
6055             "%s: Failed to allocate memory\n",
6056             __FUNCTION__);
6057 
6058         return -NVL_NO_MEM;
6059     }
6060 
6061     FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
6062     {
6063         // Unregister links to make them unusable while reset is in progress.
6064         link_info = nvswitch_get_link(device, link);
6065         if (link_info == NULL)
6066         {
6067             NVSWITCH_PRINT(device, ERROR,
6068                 "%s: invalid link %d\n",
6069                 __FUNCTION__, link);
6070             continue;
6071         }
6072 
6073         nvlink_lib_unregister_link(link_info);
6074 
6075         //
6076         // Step 0 :
6077         // Prior to starting port reset, FM must shutdown the NVlink links
6078         // it wishes to reset.
6079         // However, with shared-virtualization, FM is unable to shut down the links
6080         // since the GPU is no longer attached to the service VM.
6081         // In this case, we must perform unilateral shutdown on the LR10 side
6082         // of the link.
6083         //
6084         // If links are in OFF or RESET, we don't need to perform shutdown
6085         // If links already went through a proper pseudo-clean shutdown sequence,
6086         // they'll be in SAFE + sublinks in OFF
6087         //
6088 
6089         status = nvswitch_corelib_get_dl_link_mode_lr10(link_info, &link_mode);
6090         if (status != NVL_SUCCESS)
6091         {
6092             NVSWITCH_PRINT(device, ERROR,
6093                 "%s: Unable to get link mode from link %d\n",
6094                 __FUNCTION__, link);
6095             goto nvswitch_reset_and_drain_links_exit;
6096         }
6097         status = nvswitch_corelib_get_tx_mode_lr10(link_info, &tx_sublink_mode, &tx_sublink_submode);
6098         if (status != NVL_SUCCESS)
6099         {
6100             NVSWITCH_PRINT(device, ERROR,
6101                 "%s: Unable to get tx sublink mode from link %d\n",
6102                 __FUNCTION__, link);
6103             goto nvswitch_reset_and_drain_links_exit;
6104         }
6105         status = nvswitch_corelib_get_rx_mode_lr10(link_info, &rx_sublink_mode, &rx_sublink_submode);
6106         if (status != NVL_SUCCESS)
6107         {
6108             NVSWITCH_PRINT(device, ERROR,
6109                 "%s: Unable to get rx sublink mode from link %d\n",
6110                 __FUNCTION__, link);
6111             goto nvswitch_reset_and_drain_links_exit;
6112         }
6113 
6114         if (!((link_mode == NVLINK_LINKSTATE_RESET) ||
6115               (link_mode == NVLINK_LINKSTATE_OFF) ||
6116               ((link_mode == NVLINK_LINKSTATE_SAFE) &&
6117                (tx_sublink_mode == NVLINK_SUBLINK_STATE_TX_OFF) &&
6118                (rx_sublink_mode == NVLINK_SUBLINK_STATE_RX_OFF))))
6119         {
6120             nvswitch_execute_unilateral_link_shutdown_lr10(link_info);
6121             nvswitch_corelib_clear_link_state_lr10(link_info);
6122         }
6123 
6124         //
6125         // Step 1 : Perform surgical reset
6126         // Refer to switch IAS 11.5.2 Link Reset.
6127         //
6128 
6129         // Step 1.a : Backup NPORT state before reset
6130         for (i = 0; i < reg_count; i++)
6131         {
6132             nport_reg_val[i] = NVSWITCH_ENG_OFF_RD32(device, NPORT, _UNICAST, link,
6133                 nport_reg_addr[i]);
6134         }
6135 
6136         // Step 1.b : Assert INGRESS_STOP / EGRESS_STOP
6137         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CTRL_STOP);
6138         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _INGRESS_STOP, _STOP, val);
6139         val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _EGRESS_STOP, _STOP, val);
6140         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CTRL_STOP, val);
6141 
6142         // Wait for stop operation to take effect at TLC.
6143         // Expected a minimum of 256 clk cycles.
6144         nvswitch_os_sleep(1);
6145 
6146         //
6147         // Step 1.c : Disable NPG & NVLW interrupts
6148         //
6149         _nvswitch_link_disable_interrupts_lr10(device, link);
6150 
6151         // Step 1.d : Assert NPortWarmReset
6152         npg = link / NVSWITCH_LINKS_PER_NPG;
6153         val = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _WARMRESET);
6154 
6155         idx_nport = link % NVSWITCH_LINKS_PER_NPG;
6156         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET,
6157             DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, ~NVBIT(idx_nport)));
6158 
6159         // Step 1.e : Initiate Minion reset sequence.
6160         status = nvswitch_request_tl_link_state_lr10(link_info,
6161             NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
6162         if (status != NVL_SUCCESS)
6163         {
6164             NVSWITCH_PRINT(device, ERROR,
6165                 "%s: NvLink Reset has failed for link %d\n",
6166                 __FUNCTION__, link);
6167             goto nvswitch_reset_and_drain_links_exit;
6168         }
6169 
6170         // Step 1.e : De-assert NPortWarmReset
6171         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET, val);
6172 
6173         // Step 1.f : Assert and De-assert NPort debug_clear
6174         // to clear the error status
6175         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6176             DRF_NUM(_NPG, _DEBUG_CLEAR, _CLEAR, NVBIT(idx_nport)));
6177 
6178         NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR,
6179             DRF_DEF(_NPG, _DEBUG_CLEAR, _CLEAR, _DEASSERT));
6180 
6181         // Step 1.g : Clear CONTAIN_AND_DRAIN to clear contain state (Bug 3115824)
6182         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN,
6183             DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE));
6184 
6185         val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN);
6186         if (FLD_TEST_DRF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE, val))
6187         {
6188             NVSWITCH_PRINT(device, ERROR,
6189                 "%s: NPORT Contain and Drain Clear has failed for link %d\n",
6190                 __FUNCTION__, link);
6191             status = NVL_ERR_INVALID_STATE;
6192             goto nvswitch_reset_and_drain_links_exit;
6193         }
6194 
6195         //
6196         // Step 2 : Assert NPORT Reset after Control & Drain routine.
6197         //  Clear Tagpool, CrumbStore and CAM RAMs
6198         //
6199 
6200         // Step 2.a Clear Tagpool RAM
6201         NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _INITIALIZATION,
6202             DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT));
6203 
6204         nvswitch_timeout_create(25 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
6205 
6206         do
6207         {
6208             keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
6209 
6210             // Check if NPORT initialization is done
6211             val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _INITIALIZATION);
6212             if (FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6213             {
6214                 break;
6215             }
6216 
6217             nvswitch_os_sleep(1);
6218         }
6219         while (keepPolling);
6220 
6221         if (!FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val))
6222         {
6223             NVSWITCH_PRINT(device, ERROR,
6224                 "%s: Timeout waiting for TAGPOOL Initialization on link %d)\n",
6225                 __FUNCTION__, link);
6226 
6227             status = -NVL_INITIALIZATION_TOTAL_FAILURE;
6228             goto nvswitch_reset_and_drain_links_exit;
6229         }
6230 
6231         // Step 2.b Clear CrumbStore RAM
6232         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6233               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) |
6234               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6235 
6236         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6237         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6238 
6239         val = DRF_NUM(_TSTATE, _RAM_DATA0, _ECC, 0x7f);
6240         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_TAGPOOL_CRUMBSTORE_TDTID_DEPTH; i++)
6241         {
6242             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, val);
6243         }
6244 
6245         // Step 2.c Clear CAM RAM
6246         val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) |
6247               DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CREQ_CAM) |
6248               DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1);
6249 
6250         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val);
6251         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0);
6252         NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA2, 0x0);
6253 
6254         for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_CREQ_CAM_DEPTH; i++)
6255         {
6256             NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, 0x0);
6257         }
6258 
6259         //
6260         // Step 3 : Restore link state
6261         //
6262 
6263         // Restore NPORT state after reset
6264         for (i = 0; i < reg_count; i++)
6265         {
6266             NVSWITCH_ENG_OFF_WR32(device, NPORT, _UNICAST, link,
6267                                   nport_reg_addr[i], nport_reg_val[i]);
6268         }
6269 
6270         // Initialize GLT
6271         nvswitch_set_ganged_link_table_lr10(device, 0, chip_device->ganged_link_table,
6272                                             ROUTE_GANG_TABLE_SIZE/2);
6273 
6274         // Initialize select scratch registers to 0x0
6275         nvswitch_init_scratch_lr10(device);
6276 
6277         // Reset NVLW and NPORT interrupt state
6278         _nvswitch_link_reset_interrupts_lr10(device, link);
6279 
6280         // Re-register links.
6281         status = nvlink_lib_register_link(device->nvlink_device, link_info);
6282         if (status != NVL_SUCCESS)
6283         {
6284             nvswitch_destroy_link(link_info);
6285             goto nvswitch_reset_and_drain_links_exit;
6286         }
6287     }
6288     FOR_EACH_INDEX_IN_MASK_END;
6289 
6290     // Launch ALI training if applicable
6291     (void)nvswitch_launch_ALI(device);
6292 
6293 nvswitch_reset_and_drain_links_exit:
6294     nvswitch_os_free(nport_reg_val);
6295     return status;
6296 }
6297 
6298 NvlStatus
nvswitch_get_nvlink_ecc_errors_lr10(nvswitch_device * device,NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS * params)6299 nvswitch_get_nvlink_ecc_errors_lr10
6300 (
6301     nvswitch_device *device,
6302     NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params
6303 )
6304 {
6305     NvU32 statData;
6306     NvU8 i, j;
6307     NvlStatus status;
6308     NvBool bLaneReversed;
6309 
6310     nvswitch_os_memset(params->errorLink, 0, sizeof(params->errorLink));
6311 
6312     FOR_EACH_INDEX_IN_MASK(64, i, params->linkMask)
6313     {
6314         nvlink_link         *link;
6315         NVSWITCH_LANE_ERROR *errorLane;
6316         NvU8                offset;
6317         NvBool              minion_enabled;
6318         NvU32               sublinkWidth;
6319 
6320         link = nvswitch_get_link(device, i);
6321 
6322         if ((link == NULL) ||
6323             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
6324             (i >= NVSWITCH_LINK_COUNT(device)))
6325         {
6326             return -NVL_BAD_ARGS;
6327         }
6328 
6329         sublinkWidth = device->hal.nvswitch_get_sublink_width(device, i);
6330 
6331         minion_enabled = nvswitch_is_minion_initialized(device,
6332             NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION));
6333 
6334         bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber);
6335 
6336         for (j = 0; j < NVSWITCH_NVLINK_MAX_LANES; j++)
6337         {
6338             if (minion_enabled && (j < sublinkWidth))
6339             {
6340                 status = nvswitch_minion_get_dl_status(device, i,
6341                                         (NV_NVLSTAT_RX12 + j), 0, &statData);
6342 
6343                 if (status != NVL_SUCCESS)
6344                 {
6345                     return status;
6346                 }
6347                 offset = bLaneReversed ? ((sublinkWidth - 1) - j) : j;
6348                 errorLane                = &params->errorLink[i].errorLane[offset];
6349                 errorLane->valid         = NV_TRUE;
6350             }
6351             else
6352             {
6353                 // MINION disabled
6354                 statData                 = 0;
6355                 offset                   = j;
6356                 errorLane                = &params->errorLink[i].errorLane[offset];
6357                 errorLane->valid         = NV_FALSE;
6358             }
6359 
6360             errorLane->eccErrorValue = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_VALUE, statData);
6361             errorLane->overflowed    = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_OVER, statData);
6362         }
6363     }
6364     FOR_EACH_INDEX_IN_MASK_END;
6365 
6366     return NVL_SUCCESS;
6367 }
6368 
6369 static NvU32
nvswitch_get_num_links_lr10(nvswitch_device * device)6370 nvswitch_get_num_links_lr10
6371 (
6372     nvswitch_device *device
6373 )
6374 {
6375     NvU32 num_links = NVSWITCH_NUM_LINKS_LR10;
6376     return num_links;
6377 }
6378 
6379 static NvU8
nvswitch_get_num_links_per_nvlipt_lr10(nvswitch_device * device)6380 nvswitch_get_num_links_per_nvlipt_lr10
6381 (
6382     nvswitch_device *device
6383 )
6384 {
6385     return NVSWITCH_LINKS_PER_NVLIPT;
6386 }
6387 
6388 NvBool
nvswitch_is_link_valid_lr10(nvswitch_device * device,NvU32 link_id)6389 nvswitch_is_link_valid_lr10
6390 (
6391     nvswitch_device *device,
6392     NvU32            link_id
6393 )
6394 {
6395     if (link_id >= nvswitch_get_num_links(device))
6396     {
6397         return NV_FALSE;
6398     }
6399     return device->link[link_id].valid;
6400 }
6401 
6402 NvlStatus
nvswitch_ctrl_get_fom_values_lr10(nvswitch_device * device,NVSWITCH_GET_FOM_VALUES_PARAMS * p)6403 nvswitch_ctrl_get_fom_values_lr10
6404 (
6405     nvswitch_device *device,
6406     NVSWITCH_GET_FOM_VALUES_PARAMS *p
6407 )
6408 {
6409     NvlStatus status;
6410     NvU32     statData;
6411     nvlink_link *link;
6412 
6413     link = nvswitch_get_link(device, p->linkId);
6414     if (link == NULL)
6415     {
6416         NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n",
6417             __FUNCTION__, p->linkId);
6418         return -NVL_BAD_ARGS;
6419     }
6420 
6421     status = nvswitch_minion_get_dl_status(device, p->linkId,
6422                                         NV_NVLSTAT_TR16, 0, &statData);
6423     p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF);
6424     p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF);
6425 
6426     status = nvswitch_minion_get_dl_status(device, p->linkId,
6427                                         NV_NVLSTAT_TR17, 0, &statData);
6428     p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF);
6429     p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF);
6430 
6431     p->numLanes = nvswitch_get_sublink_width(device, p->linkId);
6432 
6433     return status;
6434 }
6435 
6436 void
nvswitch_set_fatal_error_lr10(nvswitch_device * device,NvBool device_fatal,NvU32 link_id)6437 nvswitch_set_fatal_error_lr10
6438 (
6439     nvswitch_device *device,
6440     NvBool           device_fatal,
6441     NvU32            link_id
6442 )
6443 {
6444     NvU32 reg;
6445 
6446     NVSWITCH_ASSERT(link_id < nvswitch_get_num_links(device));
6447 
6448     // On first fatal error, notify PORT_DOWN
6449     if (!device->link[link_id].fatal_error_occurred)
6450     {
6451         if (nvswitch_lib_notify_client_events(device,
6452                     NVSWITCH_DEVICE_EVENT_PORT_DOWN) != NVL_SUCCESS)
6453         {
6454             NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n",
6455                          __FUNCTION__);
6456         }
6457     }
6458 
6459     device->link[link_id].fatal_error_occurred = NV_TRUE;
6460 
6461     if (device_fatal)
6462     {
6463         reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6464         reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
6465                               1, reg);
6466 
6467         NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6468     }
6469     else
6470     {
6471         reg = NVSWITCH_LINK_RD32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM);
6472         reg = FLD_SET_DRF_NUM(_NPORT, _SCRATCH_WARM, _PORT_RESET_REQUIRED,
6473                               1, reg);
6474 
6475         NVSWITCH_LINK_WR32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, reg);
6476     }
6477 }
6478 
6479 static NvU32
nvswitch_get_latency_sample_interval_msec_lr10(nvswitch_device * device)6480 nvswitch_get_latency_sample_interval_msec_lr10
6481 (
6482     nvswitch_device *device
6483 )
6484 {
6485     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6486     return chip_device->latency_stats->sample_interval_msec;
6487 }
6488 
6489 NvU32
nvswitch_get_swap_clk_default_lr10(nvswitch_device * device)6490 nvswitch_get_swap_clk_default_lr10
6491 (
6492     nvswitch_device *device
6493 )
6494 {
6495     return -NVL_ERR_NOT_SUPPORTED;
6496 }
6497 
6498 NvBool
nvswitch_is_link_in_use_lr10(nvswitch_device * device,NvU32 link_id)6499 nvswitch_is_link_in_use_lr10
6500 (
6501     nvswitch_device *device,
6502     NvU32 link_id
6503 )
6504 {
6505     NvU32 data;
6506     nvlink_link *link;
6507 
6508     link = nvswitch_get_link(device, link_id);
6509     if (link == NULL)
6510     {
6511         // A query on an invalid link should never occur
6512         NVSWITCH_ASSERT(link != NULL);
6513         return NV_FALSE;
6514     }
6515 
6516     if (nvswitch_is_link_in_reset(device, link))
6517     {
6518         return NV_FALSE;
6519     }
6520 
6521     data = NVSWITCH_LINK_RD32_LR10(device, link_id,
6522                                    NVLDL, _NVLDL_TOP, _LINK_STATE);
6523 
6524     return (DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data) !=
6525             NV_NVLDL_TOP_LINK_STATE_STATE_INIT);
6526 }
6527 
6528 static NvU32
nvswitch_get_device_dma_width_lr10(nvswitch_device * device)6529 nvswitch_get_device_dma_width_lr10
6530 (
6531     nvswitch_device *device
6532 )
6533 {
6534     return DMA_ADDR_WIDTH_LR10;
6535 }
6536 
6537 NvU32
nvswitch_get_link_ip_version_lr10(nvswitch_device * device,NvU32 link_id)6538 nvswitch_get_link_ip_version_lr10
6539 (
6540     nvswitch_device *device,
6541     NvU32            link_id
6542 )
6543 {
6544     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6545     NvU32 nvldl_instance;
6546 
6547     nvldl_instance = NVSWITCH_GET_LINK_ENG_INST(device, link_id, NVLDL);
6548     if (NVSWITCH_ENG_IS_VALID(device, NVLDL, nvldl_instance))
6549     {
6550         return chip_device->engNVLDL[nvldl_instance].version;
6551     }
6552     else
6553     {
6554         NVSWITCH_PRINT(device, ERROR,
6555             "%s: NVLink[0x%x] NVLDL instance invalid\n",
6556             __FUNCTION__, link_id);
6557         return 0;
6558     }
6559 }
6560 
6561 static NvlStatus
nvswitch_test_soe_dma_lr10(nvswitch_device * device)6562 nvswitch_test_soe_dma_lr10
6563 (
6564     nvswitch_device *device
6565 )
6566 {
6567     return soeTestDma_HAL(device, (PSOE)device->pSoe);
6568 }
6569 
6570 static NvlStatus
_nvswitch_get_reserved_throughput_counters(nvswitch_device * device,nvlink_link * link,NvU16 counter_mask,NvU64 * counter_values)6571 _nvswitch_get_reserved_throughput_counters
6572 (
6573     nvswitch_device *device,
6574     nvlink_link     *link,
6575     NvU16           counter_mask,
6576     NvU64           *counter_values
6577 )
6578 {
6579     NvU16 counter = 0;
6580 
6581     //
6582     // LR10 to use counters 0 & 2 for monitoring
6583     // (Same as GPU behavior)
6584     // Counter 0 counts data flits
6585     // Counter 2 counts all flits
6586     //
6587     FOR_EACH_INDEX_IN_MASK(16, counter, counter_mask)
6588     {
6589         NvU32 counter_type = NVBIT(counter);
6590         NvU64 data = 0;
6591 
6592         switch (counter_type)
6593         {
6594             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX:
6595             {
6596                 data = nvswitch_read_64bit_counter(device,
6597                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6598                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)),
6599                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6600                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0)));
6601                 break;
6602             }
6603             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX:
6604             {
6605                 data = nvswitch_read_64bit_counter(device,
6606                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6607                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)),
6608                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6609                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0)));
6610                 break;
6611             }
6612             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_TX:
6613             {
6614                 data = nvswitch_read_64bit_counter(device,
6615                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6616                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(2)),
6617                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6618                            NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(2)));
6619                 break;
6620             }
6621             case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_RX:
6622             {
6623                 data = nvswitch_read_64bit_counter(device,
6624                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6625                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(2)),
6626                            NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber,
6627                            NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(2)));
6628                 break;
6629             }
6630             default:
6631             {
6632                 return -NVL_ERR_NOT_SUPPORTED;
6633             }
6634         }
6635         counter_values[counter] = data;
6636     }
6637     FOR_EACH_INDEX_IN_MASK_END;
6638 
6639     return NVL_SUCCESS;
6640 }
6641 
6642 NvlStatus
nvswitch_ctrl_get_throughput_counters_lr10(nvswitch_device * device,NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS * p)6643 nvswitch_ctrl_get_throughput_counters_lr10
6644 (
6645     nvswitch_device *device,
6646     NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p
6647 )
6648 {
6649     NvlStatus status;
6650     nvlink_link *link;
6651     NvU16 i = 0;
6652 
6653     nvswitch_os_memset(p->counters, 0, sizeof(p->counters));
6654 
6655     FOR_EACH_INDEX_IN_MASK(64, i, p->linkMask)
6656     {
6657         link = nvswitch_get_link(device, i);
6658         if ((link == NULL) || (link->linkNumber >= NVSWITCH_MAX_PORTS) ||
6659             (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber)))
6660         {
6661             continue;
6662         }
6663 
6664         status = _nvswitch_get_reserved_throughput_counters(device, link, p->counterMask,
6665                         p->counters[link->linkNumber].values);
6666         if (status != NVL_SUCCESS)
6667         {
6668             NVSWITCH_PRINT(device, ERROR,
6669                 "Failed to get reserved NVLINK throughput counters on link %d\n",
6670                 link->linkNumber);
6671             return status;
6672         }
6673     }
6674     FOR_EACH_INDEX_IN_MASK_END;
6675 
6676     return NVL_SUCCESS;
6677 }
6678 
6679 static NvBool
nvswitch_is_soe_supported_lr10(nvswitch_device * device)6680 nvswitch_is_soe_supported_lr10
6681 (
6682     nvswitch_device *device
6683 )
6684 {
6685     if (device->regkeys.soe_disable == NV_SWITCH_REGKEY_SOE_DISABLE_YES)
6686     {
6687         NVSWITCH_PRINT(device, INFO, "SOE is disabled via regkey.\n");
6688         return NV_FALSE;
6689     }
6690 
6691     return NV_TRUE;
6692 }
6693 
6694 NvBool
nvswitch_is_inforom_supported_lr10(nvswitch_device * device)6695 nvswitch_is_inforom_supported_lr10
6696 (
6697     nvswitch_device *device
6698 )
6699 {
6700     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6701     {
6702         NVSWITCH_PRINT(device, INFO,
6703             "INFOROM is not supported on non-silicon platform\n");
6704         return NV_FALSE;
6705     }
6706 
6707     if (!nvswitch_is_soe_supported(device))
6708     {
6709         NVSWITCH_PRINT(device, INFO,
6710             "INFOROM is not supported since SOE is not supported\n");
6711         return NV_FALSE;
6712     }
6713 
6714     return NV_TRUE;
6715 }
6716 
6717 NvBool
nvswitch_is_spi_supported_lr10(nvswitch_device * device)6718 nvswitch_is_spi_supported_lr10
6719 (
6720     nvswitch_device *device
6721 )
6722 {
6723     if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
6724     {
6725         NVSWITCH_PRINT(device, INFO,
6726             "SPI is not supported on non-silicon platforms\n");
6727         return NV_FALSE;
6728     }
6729 
6730     if (!nvswitch_is_soe_supported(device))
6731     {
6732         NVSWITCH_PRINT(device, INFO,
6733             "SPI is not supported since SOE is not supported\n");
6734         return NV_FALSE;
6735     }
6736 
6737     return NV_TRUE;
6738 }
6739 
6740 NvBool
nvswitch_is_bios_supported_lr10(nvswitch_device * device)6741 nvswitch_is_bios_supported_lr10
6742 (
6743     nvswitch_device *device
6744 )
6745 {
6746     return nvswitch_is_spi_supported(device);
6747 }
6748 
6749 NvlStatus
nvswitch_get_bios_size_lr10(nvswitch_device * device,NvU32 * pSize)6750 nvswitch_get_bios_size_lr10
6751 (
6752     nvswitch_device *device,
6753     NvU32 *pSize
6754 )
6755 {
6756     return nvswitch_bios_read_size(device, pSize);
6757 }
6758 
6759 NvBool
nvswitch_is_smbpbi_supported_lr10(nvswitch_device * device)6760 nvswitch_is_smbpbi_supported_lr10
6761 (
6762     nvswitch_device *device
6763 )
6764 {
6765     if (IS_RTLSIM(device) || IS_FMODEL(device))
6766     {
6767         NVSWITCH_PRINT(device, INFO,
6768             "SMBPBI is not supported on RTLSIM/FMODEL platforms\n");
6769         return NV_FALSE;
6770     }
6771 
6772     if (!nvswitch_is_soe_supported(device))
6773     {
6774         NVSWITCH_PRINT(device, INFO,
6775             "SMBPBI is not supported since SOE is not supported\n");
6776         return NV_FALSE;
6777     }
6778 
6779     return NV_TRUE;
6780 }
6781 
6782 /*
6783  * @Brief : Additional setup needed after device initialization
6784  *
6785  * @Description :
6786  *
6787  * @param[in] device        a reference to the device to initialize
6788  */
6789 NvlStatus
nvswitch_post_init_device_setup_lr10(nvswitch_device * device)6790 nvswitch_post_init_device_setup_lr10
6791 (
6792     nvswitch_device *device
6793 )
6794 {
6795     NvlStatus retval;
6796 
6797     if (device->regkeys.soe_dma_self_test ==
6798             NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE)
6799     {
6800         NVSWITCH_PRINT(device, INFO,
6801             "Skipping SOE DMA selftest as requested using regkey\n");
6802     }
6803     else if (IS_RTLSIM(device) || IS_FMODEL(device))
6804     {
6805         NVSWITCH_PRINT(device, SETUP,
6806             "Skipping DMA selftest on FMODEL/RTLSIM platforms\n");
6807     }
6808     else if (!nvswitch_is_soe_supported(device))
6809     {
6810         NVSWITCH_PRINT(device, SETUP,
6811             "Skipping DMA selftest since SOE is not supported\n");
6812     }
6813     else
6814     {
6815         retval = nvswitch_test_soe_dma_lr10(device);
6816         if (retval != NVL_SUCCESS)
6817         {
6818             return retval;
6819         }
6820     }
6821 
6822     if (nvswitch_is_inforom_supported(device))
6823     {
6824         nvswitch_inforom_post_init(device);
6825     }
6826     else
6827     {
6828         NVSWITCH_PRINT(device, SETUP, "Skipping INFOROM init\n");
6829     }
6830 
6831     nvswitch_soe_init_l2_state(device);
6832 
6833     return NVL_SUCCESS;
6834 }
6835 
6836 /*
6837  * @Brief : Additional setup needed after blacklisted device initialization
6838  *
6839  * @Description :
6840  *
6841  * @param[in] device        a reference to the device to initialize
6842  */
6843 void
nvswitch_post_init_blacklist_device_setup_lr10(nvswitch_device * device)6844 nvswitch_post_init_blacklist_device_setup_lr10
6845 (
6846     nvswitch_device *device
6847 )
6848 {
6849     NvlStatus status;
6850 
6851     if (nvswitch_is_inforom_supported(device))
6852     {
6853         nvswitch_inforom_post_init(device);
6854     }
6855 
6856     //
6857     // Initialize the driver state monitoring callback.
6858     // This is still needed for SOE to report correct driver state.
6859     //
6860     status = nvswitch_smbpbi_post_init(device);
6861     if (status != NVL_SUCCESS)
6862     {
6863         NVSWITCH_PRINT(device, ERROR, "Smbpbi post init failed, rc:%d\n",
6864                        status);
6865         return;
6866     }
6867 
6868     //
6869     // This internally will only flush if OMS value has changed
6870     //
6871     status = device->hal.nvswitch_oms_inforom_flush(device);
6872     if (status != NVL_SUCCESS)
6873     {
6874         NVSWITCH_PRINT(device, ERROR, "Flushing OMS failed, rc:%d\n",
6875                        status);
6876         return;
6877     }
6878 }
6879 
6880 void
nvswitch_load_uuid_lr10(nvswitch_device * device)6881 nvswitch_load_uuid_lr10
6882 (
6883     nvswitch_device *device
6884 )
6885 {
6886     NvU32 regData[4];
6887 
6888     //
6889     // Read 128-bit UUID from secure scratch registers which must be
6890     // populated by firmware.
6891     //
6892     regData[0] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_8);
6893     regData[1] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_9);
6894     regData[2] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_10);
6895     regData[3] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_11);
6896 
6897     nvswitch_os_memcpy(&device->uuid.uuid, (NvU8 *)regData, NV_UUID_LEN);
6898 }
6899 
6900 NvlStatus
nvswitch_read_oob_blacklist_state_lr10(nvswitch_device * device)6901 nvswitch_read_oob_blacklist_state_lr10
6902 (
6903     nvswitch_device *device
6904 )
6905 {
6906     NvU32 reg;
6907     NvBool is_oob_blacklist;
6908     NvlStatus status;
6909 
6910     if (device == NULL)
6911     {
6912         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6913         return -NVL_BAD_ARGS;
6914     }
6915 
6916     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SCRATCH_COLD);
6917 
6918     // Check for uninitialized SCRATCH_COLD before declaring the device blacklisted
6919     if (reg == NV_NVLSAW_SCRATCH_COLD_DATA_INIT)
6920         is_oob_blacklist = NV_FALSE;
6921     else
6922         is_oob_blacklist = DRF_VAL(_NVLSAW, _SCRATCH_COLD, _OOB_BLACKLIST_DEVICE_REQUESTED, reg);
6923 
6924     status = nvswitch_inforom_oms_set_device_disable(device, is_oob_blacklist);
6925     if (status != NVL_SUCCESS)
6926     {
6927         NVSWITCH_PRINT(device, ERROR,
6928             "Failed to set device disable to %d, rc:%d\n",
6929             is_oob_blacklist, status);
6930     }
6931 
6932     if (is_oob_blacklist)
6933     {
6934         device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED;
6935         device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND;
6936     }
6937 
6938     return NVL_SUCCESS;
6939 }
6940 
6941 NvlStatus
nvswitch_write_fabric_state_lr10(nvswitch_device * device)6942 nvswitch_write_fabric_state_lr10
6943 (
6944     nvswitch_device *device
6945 )
6946 {
6947     NvU32 reg;
6948 
6949     if (device == NULL)
6950     {
6951         NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__);
6952         return -NVL_BAD_ARGS;
6953     }
6954 
6955     // bump the sequence number for each write
6956     device->fabric_state_sequence_number++;
6957 
6958     reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
6959 
6960     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_BLACKLIST_REASON,
6961                           device->device_blacklist_reason, reg);
6962     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_FABRIC_STATE,
6963                           device->device_fabric_state, reg);
6964     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DRIVER_FABRIC_STATE,
6965                           device->driver_fabric_state, reg);
6966     reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _EVENT_MESSAGE_COUNT,
6967                           device->fabric_state_sequence_number, reg);
6968 
6969     NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg);
6970 
6971     return NVL_SUCCESS;
6972 }
6973 
6974 static NVSWITCH_ENGINE_DESCRIPTOR_TYPE *
_nvswitch_get_eng_descriptor_lr10(nvswitch_device * device,NVSWITCH_ENGINE_ID eng_id)6975 _nvswitch_get_eng_descriptor_lr10
6976 (
6977     nvswitch_device *device,
6978     NVSWITCH_ENGINE_ID eng_id
6979 )
6980 {
6981     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
6982     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = NULL;
6983 
6984     if (eng_id >= NVSWITCH_ENGINE_ID_SIZE)
6985     {
6986         NVSWITCH_PRINT(device, ERROR,
6987             "%s: Engine_ID 0x%x out of range 0..0x%x\n",
6988             __FUNCTION__,
6989             eng_id, NVSWITCH_ENGINE_ID_SIZE-1);
6990         return NULL;
6991     }
6992 
6993     engine = &(chip_device->io.common[eng_id]);
6994     NVSWITCH_ASSERT(eng_id == engine->eng_id);
6995 
6996     return engine;
6997 }
6998 
6999 NvU32
nvswitch_get_eng_base_lr10(nvswitch_device * device,NVSWITCH_ENGINE_ID eng_id,NvU32 eng_bcast,NvU32 eng_instance)7000 nvswitch_get_eng_base_lr10
7001 (
7002     nvswitch_device *device,
7003     NVSWITCH_ENGINE_ID eng_id,
7004     NvU32 eng_bcast,
7005     NvU32 eng_instance
7006 )
7007 {
7008     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
7009     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7010 
7011     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7012     if (engine == NULL)
7013     {
7014         NVSWITCH_PRINT(device, ERROR,
7015             "%s: ID 0x%x[%d] %s not found\n",
7016             __FUNCTION__,
7017             eng_id, eng_instance,
7018             (
7019                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7020                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7021                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7022                 "??"
7023             ));
7024         return NVSWITCH_BASE_ADDR_INVALID;
7025     }
7026 
7027     if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) &&
7028         (eng_instance < engine->eng_count))
7029     {
7030         base_addr = engine->uc_addr[eng_instance];
7031     }
7032     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7033     {
7034         base_addr = engine->bc_addr;
7035     }
7036     else if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) &&
7037         (eng_instance < engine->mc_addr_count))
7038     {
7039         base_addr = engine->mc_addr[eng_instance];
7040     }
7041     else
7042     {
7043         NVSWITCH_PRINT(device, ERROR,
7044             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7045             __FUNCTION__,
7046             eng_bcast);
7047     }
7048 
7049     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7050     {
7051         NVSWITCH_PRINT(device, ERROR,
7052             "%s: ID 0x%x[%d] %s invalid address\n",
7053             __FUNCTION__,
7054             eng_id, eng_instance,
7055             (
7056                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7057                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7058                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7059                 "??"
7060             ));
7061     }
7062 
7063     return base_addr;
7064 }
7065 
7066 NvU32
nvswitch_get_eng_count_lr10(nvswitch_device * device,NVSWITCH_ENGINE_ID eng_id,NvU32 eng_bcast)7067 nvswitch_get_eng_count_lr10
7068 (
7069     nvswitch_device *device,
7070     NVSWITCH_ENGINE_ID eng_id,
7071     NvU32 eng_bcast
7072 )
7073 {
7074     NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine;
7075     NvU32 eng_count = 0;
7076 
7077     engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7078     if (engine == NULL)
7079     {
7080         NVSWITCH_PRINT(device, ERROR,
7081             "%s: ID 0x%x %s not found\n",
7082             __FUNCTION__,
7083             eng_id,
7084             (
7085                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7086                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7087                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7088                 "??"
7089             ));
7090         return 0;
7091     }
7092 
7093     if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST)
7094     {
7095         eng_count = engine->eng_count;
7096     }
7097     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST)
7098     {
7099         if (engine->bc_addr == NVSWITCH_BASE_ADDR_INVALID)
7100         {
7101             eng_count = 0;
7102         }
7103         else
7104         {
7105             eng_count = 1;
7106         }
7107     }
7108     else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST)
7109     {
7110         eng_count = engine->mc_addr_count;
7111     }
7112     else
7113     {
7114         NVSWITCH_PRINT(device, ERROR,
7115             "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n",
7116             __FUNCTION__,
7117             eng_bcast);
7118     }
7119 
7120     return eng_count;
7121 }
7122 
7123 NvU32
nvswitch_eng_rd_lr10(nvswitch_device * device,NVSWITCH_ENGINE_ID eng_id,NvU32 eng_bcast,NvU32 eng_instance,NvU32 offset)7124 nvswitch_eng_rd_lr10
7125 (
7126     nvswitch_device *device,
7127     NVSWITCH_ENGINE_ID eng_id,
7128     NvU32 eng_bcast,
7129     NvU32 eng_instance,
7130     NvU32 offset
7131 )
7132 {
7133     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7134     NvU32 data;
7135 
7136     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7137     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7138     {
7139         NVSWITCH_PRINT(device, ERROR,
7140             "%s: ID 0x%x[%d] %s invalid address\n",
7141             __FUNCTION__,
7142             eng_id, eng_instance,
7143             (
7144                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7145                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7146                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7147                 "??"
7148             ));
7149         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7150         return 0xBADFBADF;
7151     }
7152 
7153     data = nvswitch_reg_read_32(device, base_addr + offset);
7154 
7155 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7156     {
7157         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7158 
7159         NVSWITCH_PRINT(device, MMIO,
7160             "%s: ENG_RD %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7161             __FUNCTION__,
7162             engine->eng_name, engine->eng_id,
7163             eng_instance,
7164             base_addr, offset,
7165             data);
7166     }
7167 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7168 
7169     return data;
7170 }
7171 
7172 void
nvswitch_eng_wr_lr10(nvswitch_device * device,NVSWITCH_ENGINE_ID eng_id,NvU32 eng_bcast,NvU32 eng_instance,NvU32 offset,NvU32 data)7173 nvswitch_eng_wr_lr10
7174 (
7175     nvswitch_device *device,
7176     NVSWITCH_ENGINE_ID eng_id,
7177     NvU32 eng_bcast,
7178     NvU32 eng_instance,
7179     NvU32 offset,
7180     NvU32 data
7181 )
7182 {
7183     NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID;
7184 
7185     base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance);
7186     if (base_addr == NVSWITCH_BASE_ADDR_INVALID)
7187     {
7188         NVSWITCH_PRINT(device, ERROR,
7189             "%s: ID 0x%x[%d] %s invalid address\n",
7190             __FUNCTION__,
7191             eng_id, eng_instance,
7192             (
7193                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
7194                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
7195                 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
7196                 "??"
7197             ));
7198         NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID);
7199         return;
7200     }
7201 
7202     nvswitch_reg_write_32(device, base_addr + offset,  data);
7203 
7204 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7205     {
7206         NVSWITCH_ENGINE_DESCRIPTOR_TYPE  *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id);
7207 
7208         NVSWITCH_PRINT(device, MMIO,
7209             "%s: ENG_WR %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n",
7210             __FUNCTION__,
7211             engine->eng_name, engine->eng_id,
7212             eng_instance,
7213             base_addr, offset,
7214             data);
7215     }
7216 #endif  //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
7217 }
7218 
7219 NvU32
nvswitch_get_link_eng_inst_lr10(nvswitch_device * device,NvU32 link_id,NVSWITCH_ENGINE_ID eng_id)7220 nvswitch_get_link_eng_inst_lr10
7221 (
7222     nvswitch_device *device,
7223     NvU32 link_id,
7224     NVSWITCH_ENGINE_ID eng_id
7225 )
7226 {
7227     NvU32   eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7228 
7229     if (link_id >= NVSWITCH_LINK_COUNT(device))
7230     {
7231         NVSWITCH_PRINT(device, ERROR,
7232             "%s: link ID 0x%x out-of-range [0x0..0x%x]\n",
7233             __FUNCTION__,
7234             link_id, NVSWITCH_LINK_COUNT(device)-1);
7235         return NVSWITCH_ENGINE_INSTANCE_INVALID;
7236     }
7237 
7238     switch (eng_id)
7239     {
7240         case NVSWITCH_ENGINE_ID_NPG:
7241             eng_instance = link_id / NVSWITCH_LINKS_PER_NPG;
7242             break;
7243         case NVSWITCH_ENGINE_ID_NVLIPT:
7244             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLIPT;
7245             break;
7246         case NVSWITCH_ENGINE_ID_NVLW:
7247         case NVSWITCH_ENGINE_ID_NVLW_PERFMON:
7248             eng_instance = link_id / NVSWITCH_LINKS_PER_NVLW;
7249             break;
7250         case NVSWITCH_ENGINE_ID_MINION:
7251             eng_instance = link_id / NVSWITCH_LINKS_PER_MINION;
7252             break;
7253         case NVSWITCH_ENGINE_ID_NPORT:
7254         case NVSWITCH_ENGINE_ID_NVLTLC:
7255         case NVSWITCH_ENGINE_ID_NVLDL:
7256         case NVSWITCH_ENGINE_ID_NVLIPT_LNK:
7257         case NVSWITCH_ENGINE_ID_NPORT_PERFMON:
7258             eng_instance = link_id;
7259             break;
7260         default:
7261             NVSWITCH_PRINT(device, ERROR,
7262                 "%s: link ID 0x%x has no association with EngID 0x%x\n",
7263                 __FUNCTION__,
7264                 link_id, eng_id);
7265             eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID;
7266             break;
7267     }
7268 
7269     return eng_instance;
7270 }
7271 
7272 NvU32
nvswitch_get_caps_nvlink_version_lr10(nvswitch_device * device)7273 nvswitch_get_caps_nvlink_version_lr10
7274 (
7275     nvswitch_device *device
7276 )
7277 {
7278     ct_assert(NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0 ==
7279                 NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0);
7280     return NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0;
7281 }
7282 
7283 NVSWITCH_BIOS_NVLINK_CONFIG *
nvswitch_get_bios_nvlink_config_lr10(nvswitch_device * device)7284 nvswitch_get_bios_nvlink_config_lr10
7285 (
7286     nvswitch_device *device
7287 )
7288 {
7289     lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
7290 
7291     return (chip_device != NULL) ? &chip_device->bios_config : NULL;
7292 }
7293 
7294 /*
7295  * CTRL_NVSWITCH_SET_RESIDENCY_BINS
7296  */
7297 static NvlStatus
nvswitch_ctrl_set_residency_bins_lr10(nvswitch_device * device,NVSWITCH_SET_RESIDENCY_BINS * p)7298 nvswitch_ctrl_set_residency_bins_lr10
7299 (
7300     nvswitch_device *device,
7301     NVSWITCH_SET_RESIDENCY_BINS *p
7302 )
7303 {
7304     NVSWITCH_PRINT(device, ERROR,
7305         "SET_RESIDENCY_BINS should not be called on LR10\n");
7306     return -NVL_ERR_NOT_SUPPORTED;
7307 }
7308 
7309 /*
7310  * CTRL_NVSWITCH_GET_RESIDENCY_BINS
7311  */
7312 static NvlStatus
nvswitch_ctrl_get_residency_bins_lr10(nvswitch_device * device,NVSWITCH_GET_RESIDENCY_BINS * p)7313 nvswitch_ctrl_get_residency_bins_lr10
7314 (
7315     nvswitch_device *device,
7316     NVSWITCH_GET_RESIDENCY_BINS *p
7317 )
7318 {
7319     NVSWITCH_PRINT(device, ERROR,
7320         "GET_RESIDENCY_BINS should not be called on LR10\n");
7321     return -NVL_ERR_NOT_SUPPORTED;
7322 }
7323 
7324 /*
7325  * CTRL_NVSWITCH_GET_RB_STALL_BUSY
7326  */
7327 static NvlStatus
nvswitch_ctrl_get_rb_stall_busy_lr10(nvswitch_device * device,NVSWITCH_GET_RB_STALL_BUSY * p)7328 nvswitch_ctrl_get_rb_stall_busy_lr10
7329 (
7330     nvswitch_device *device,
7331     NVSWITCH_GET_RB_STALL_BUSY *p
7332 )
7333 {
7334     NVSWITCH_PRINT(device, ERROR,
7335         "GET_RB_STALL_BUSY should not be called on LR10\n");
7336     return -NVL_ERR_NOT_SUPPORTED;
7337 }
7338 
7339 /*
7340  * CTRL_NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR
7341  */
7342 static NvlStatus
nvswitch_ctrl_get_multicast_id_error_vector_lr10(nvswitch_device * device,NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR * p)7343 nvswitch_ctrl_get_multicast_id_error_vector_lr10
7344 (
7345     nvswitch_device *device,
7346     NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR *p
7347 )
7348 {
7349     NVSWITCH_PRINT(device, ERROR,
7350         "GET_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7351     return -NVL_ERR_NOT_SUPPORTED;
7352 }
7353 
7354 /*
7355  * CTRL_NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR
7356  */
7357 static NvlStatus
nvswitch_ctrl_clear_multicast_id_error_vector_lr10(nvswitch_device * device,NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR * p)7358 nvswitch_ctrl_clear_multicast_id_error_vector_lr10
7359 (
7360     nvswitch_device *device,
7361     NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR *p
7362 )
7363 {
7364     NVSWITCH_PRINT(device, ERROR,
7365         "CLEAR_MULTICAST_ID_ERROR_VECTOR should not be called on LR10\n");
7366     return -NVL_ERR_NOT_SUPPORTED;
7367 }
7368 
7369 void
nvswitch_send_inband_nack_lr10(nvswitch_device * device,NvU32 * msghdr,NvU32 linkId)7370 nvswitch_send_inband_nack_lr10
7371 (
7372     nvswitch_device *device,
7373     NvU32 *msghdr,
7374     NvU32  linkId
7375 )
7376 {
7377     return;
7378 }
7379 
7380 NvU32
nvswitch_get_max_persistent_message_count_lr10(nvswitch_device * device)7381 nvswitch_get_max_persistent_message_count_lr10
7382 (
7383     nvswitch_device *device
7384 )
7385 {
7386     return 0;
7387 }
7388 
7389 /*
7390  * CTRL_NVSWITCH_INBAND_SEND_DATA
7391  */
7392 NvlStatus
nvswitch_ctrl_inband_send_data_lr10(nvswitch_device * device,NVSWITCH_INBAND_SEND_DATA_PARAMS * p)7393 nvswitch_ctrl_inband_send_data_lr10
7394 (
7395     nvswitch_device *device,
7396     NVSWITCH_INBAND_SEND_DATA_PARAMS *p
7397 )
7398 {
7399     return -NVL_ERR_NOT_SUPPORTED;
7400 }
7401 
7402 /*
7403  * CTRL_NVSWITCH_INBAND_RECEIVE_DATA
7404  */
7405 NvlStatus
nvswitch_ctrl_inband_read_data_lr10(nvswitch_device * device,NVSWITCH_INBAND_READ_DATA_PARAMS * p)7406 nvswitch_ctrl_inband_read_data_lr10
7407 (
7408     nvswitch_device *device,
7409     NVSWITCH_INBAND_READ_DATA_PARAMS *p
7410 )
7411 {
7412     return -NVL_ERR_NOT_SUPPORTED;
7413 }
7414 
7415 /*
7416  * CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
7417  */
7418 NvlStatus
nvswitch_ctrl_get_board_part_number_lr10(nvswitch_device * device,NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR * p)7419 nvswitch_ctrl_get_board_part_number_lr10
7420 (
7421     nvswitch_device *device,
7422     NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
7423 )
7424 {
7425     struct inforom *pInforom = device->pInforom;
7426     INFOROM_OBD_OBJECT_V1_XX *pOBDObj;
7427     int byteIdx;
7428 
7429     if (pInforom == NULL)
7430     {
7431         return -NVL_ERR_NOT_SUPPORTED;
7432     }
7433 
7434     if (!pInforom->OBD.bValid)
7435     {
7436         NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
7437         return -NVL_ERR_GENERIC;
7438     }
7439 
7440     pOBDObj = &pInforom->OBD.object.v1;
7441 
7442     if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008))
7443     {
7444         NVSWITCH_PRINT(device, ERROR,
7445                        "board part number available size %lu is not same as the request size %lu\n",
7446                        sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data));
7447         return -NVL_ERR_GENERIC;
7448     }
7449 
7450     nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
7451 
7452     /* Copy board type data */
7453     for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++)
7454     {
7455         p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF);
7456     }
7457 
7458     return NVL_SUCCESS;
7459 }
7460 
7461 /*
7462 * @brief: This function retrieves the NVLIPT public ID for a given global link idx
7463 * @params[in]  device        reference to current nvswitch device
7464 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7465 * @params[out] publicId      Public ID of NVLIPT owning linkId
7466 */
nvswitch_get_link_public_id_lr10(nvswitch_device * device,NvU32 linkId,NvU32 * publicId)7467 NvlStatus nvswitch_get_link_public_id_lr10
7468 (
7469     nvswitch_device *device,
7470     NvU32 linkId,
7471     NvU32 *publicId
7472 )
7473 {
7474     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7475         (publicId == NULL))
7476     {
7477         return -NVL_BAD_ARGS;
7478     }
7479 
7480     *publicId = NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(linkId);
7481 
7482 
7483     return (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, *publicId)) ?
7484                 NVL_SUCCESS : -NVL_BAD_ARGS;
7485 }
7486 
7487 /*
7488 * @brief: This function retrieves the internal link idx for a given global link idx
7489 * @params[in]  device        reference to current nvswitch device
7490 * @params[in]  linkId        link to retrieve NVLIPT public ID from
7491 * @params[out] localLinkIdx  Internal link index of linkId
7492 */
nvswitch_get_link_local_idx_lr10(nvswitch_device * device,NvU32 linkId,NvU32 * localLinkIdx)7493 NvlStatus nvswitch_get_link_local_idx_lr10
7494 (
7495     nvswitch_device *device,
7496     NvU32 linkId,
7497     NvU32 *localLinkIdx
7498 )
7499 {
7500     if (!device->hal.nvswitch_is_link_valid(device, linkId) ||
7501         (localLinkIdx == NULL))
7502     {
7503         return -NVL_BAD_ARGS;
7504     }
7505 
7506     *localLinkIdx = NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(linkId);
7507 
7508     return NVL_SUCCESS;
7509 }
7510 
nvswitch_set_training_error_info_lr10(nvswitch_device * device,NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS * pLinkTrainingErrorInfoParams)7511 NvlStatus nvswitch_set_training_error_info_lr10
7512 (
7513     nvswitch_device *device,
7514     NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams
7515 )
7516 {
7517     NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo;
7518     NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo;
7519 
7520     linkTrainingErrorInfo.isValid = NV_TRUE;
7521     linkTrainingErrorInfo.attemptedTrainingMask0 =
7522         pLinkTrainingErrorInfoParams->attemptedTrainingMask0;
7523     linkTrainingErrorInfo.trainingErrorMask0 =
7524         pLinkTrainingErrorInfoParams->trainingErrorMask0;
7525 
7526     linkRuntimeErrorInfo.isValid = NV_FALSE;
7527     linkRuntimeErrorInfo.mask0   = 0;
7528 
7529     return nvswitch_smbpbi_set_link_error_info(device,
7530                                                &linkTrainingErrorInfo,
7531                                                &linkRuntimeErrorInfo);
7532 }
7533 
nvswitch_ctrl_get_fatal_error_scope_lr10(nvswitch_device * device,NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS * pParams)7534 NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10
7535 (
7536     nvswitch_device *device,
7537     NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams
7538 )
7539 {
7540     NvU32 linkId;
7541     NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12);
7542     pParams->device = FLD_TEST_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED,
7543                                        1, reg);
7544 
7545     for (linkId = 0; linkId < NVSWITCH_MAX_PORTS; linkId++)
7546     {
7547         if (!nvswitch_is_link_valid(device, linkId))
7548         {
7549             pParams->port[linkId] = NV_FALSE;
7550             continue;
7551         }
7552 
7553         reg = NVSWITCH_LINK_RD32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7554         pParams->port[linkId] = FLD_TEST_DRF_NUM(_NPORT, _SCRATCH_WARM,
7555                                                  _PORT_RESET_REQUIRED, 1, reg);
7556     }
7557 
7558     return NVL_SUCCESS;
7559 }
7560 
nvswitch_ctrl_set_mc_rid_table_lr10(nvswitch_device * device,NVSWITCH_SET_MC_RID_TABLE_PARAMS * p)7561 NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10
7562 (
7563     nvswitch_device *device,
7564     NVSWITCH_SET_MC_RID_TABLE_PARAMS *p
7565 )
7566 {
7567     return -NVL_ERR_NOT_SUPPORTED;
7568 }
7569 
nvswitch_ctrl_get_mc_rid_table_lr10(nvswitch_device * device,NVSWITCH_GET_MC_RID_TABLE_PARAMS * p)7570 NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10
7571 (
7572     nvswitch_device *device,
7573     NVSWITCH_GET_MC_RID_TABLE_PARAMS *p
7574 )
7575 {
7576     return -NVL_ERR_NOT_SUPPORTED;
7577 }
7578 
nvswitch_init_scratch_lr10(nvswitch_device * device)7579 void nvswitch_init_scratch_lr10
7580 (
7581     nvswitch_device *device
7582 )
7583 {
7584     NvU32 linkId;
7585     NvU32 reg;
7586 
7587     for (linkId = 0; linkId < nvswitch_get_num_links(device); linkId++)
7588     {
7589         if (!nvswitch_is_link_valid(device, linkId))
7590         {
7591             continue;
7592         }
7593 
7594         reg = NVSWITCH_LINK_RD32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM);
7595         if (reg == NV_NPORT_SCRATCH_WARM_DATA_INIT)
7596         {
7597             NVSWITCH_LINK_WR32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM, 0);
7598         }
7599     }
7600 }
7601 
7602 NvlStatus
nvswitch_launch_ALI_lr10(nvswitch_device * device)7603 nvswitch_launch_ALI_lr10
7604 (
7605     nvswitch_device *device
7606 )
7607 {
7608     return -NVL_ERR_NOT_SUPPORTED;
7609 }
7610 
7611 NvlStatus
nvswitch_set_training_mode_lr10(nvswitch_device * device)7612 nvswitch_set_training_mode_lr10
7613 (
7614     nvswitch_device *device
7615 )
7616 {
7617     return NVL_SUCCESS;
7618 }
7619 
7620 NvlStatus
nvswitch_parse_bios_image_lr10(nvswitch_device * device)7621 nvswitch_parse_bios_image_lr10
7622 (
7623     nvswitch_device *device
7624 )
7625 {
7626     NVSWITCH_BIOS_NVLINK_CONFIG *bios_config;
7627     NV_STATUS status = NV_OK;
7628 
7629     // check if spi is supported
7630     if (!nvswitch_is_bios_supported(device))
7631     {
7632         NVSWITCH_PRINT(device, ERROR,
7633                 "%s: BIOS is not supported\n",
7634                 __FUNCTION__);
7635         return -NVL_ERR_NOT_SUPPORTED;
7636     }
7637 
7638     bios_config = nvswitch_get_bios_nvlink_config(device);
7639 
7640     // Parse and retrieve the VBIOS info
7641     status = _nvswitch_setup_link_vbios_overrides(device, bios_config);
7642     if ((status != NV_OK) && device->pSoe)
7643     {
7644         //To enable LS10 bringup (VBIOS is not ready and SOE is disabled), fail the device init only when SOE is enabled and vbios overrides has failed
7645         NVSWITCH_PRINT(device, ERROR,
7646                 "%s: error=0x%x\n",
7647                 __FUNCTION__, status);
7648 
7649         return -NVL_ERR_GENERIC;
7650     }
7651 
7652     return NVL_SUCCESS;
7653 }
7654 
7655 NvlStatus
nvswitch_ctrl_get_nvlink_lp_counters_lr10(nvswitch_device * device,NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS * params)7656 nvswitch_ctrl_get_nvlink_lp_counters_lr10
7657 (
7658     nvswitch_device *device,
7659     NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params
7660 )
7661 {
7662     return -NVL_ERR_NOT_SUPPORTED;
7663 }
7664 
7665 NvlStatus
nvswitch_ctrl_get_sw_info_lr10(nvswitch_device * device,NVSWITCH_GET_SW_INFO_PARAMS * p)7666 nvswitch_ctrl_get_sw_info_lr10
7667 (
7668     nvswitch_device *device,
7669     NVSWITCH_GET_SW_INFO_PARAMS *p
7670 )
7671 {
7672     NvlStatus retval = NVL_SUCCESS;
7673     NvU32 i;
7674 
7675     if (p->count > NVSWITCH_GET_SW_INFO_COUNT_MAX)
7676     {
7677         NVSWITCH_PRINT(device, ERROR,
7678             "%s: Invalid args\n",
7679             __FUNCTION__);
7680         return -NVL_BAD_ARGS;
7681     }
7682 
7683     nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_SW_INFO_COUNT_MAX);
7684 
7685     for (i = 0; i < p->count; i++)
7686     {
7687         switch (p->index[i])
7688         {
7689             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED:
7690                 p->info[i] = NV_TRUE;
7691                 break;
7692             case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED:
7693                 p->info[i] = (NvU32)_nvswitch_inforom_bbx_supported(device);
7694                 break;
7695             default:
7696                 NVSWITCH_PRINT(device, ERROR,
7697                     "%s: Undefined NVSWITCH_GET_SW_INFO_INDEX 0x%x\n",
7698                     __FUNCTION__,
7699                     p->index[i]);
7700                 retval = -NVL_BAD_ARGS;
7701                 break;
7702         }
7703     }
7704 
7705     return retval;
7706 }
7707 
7708 NvlStatus
nvswitch_ctrl_get_err_info_lr10(nvswitch_device * device,NVSWITCH_NVLINK_GET_ERR_INFO_PARAMS * ret)7709 nvswitch_ctrl_get_err_info_lr10
7710 (
7711     nvswitch_device *device,
7712     NVSWITCH_NVLINK_GET_ERR_INFO_PARAMS *ret
7713 )
7714 {
7715     nvlink_link *link;
7716     NvU32 data;
7717     NvU8 i;
7718 
7719      ret->linkMask = nvswitch_get_enabled_link_mask(device);
7720 
7721     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7722     {
7723         link = nvswitch_get_link(device, i);
7724 
7725         if ((link == NULL) ||
7726             !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) ||
7727             (i >= NVSWITCH_NVLINK_MAX_LINKS))
7728         {
7729             continue;
7730         }
7731 
7732         // TODO NVidia TL not supported
7733         NVSWITCH_PRINT(device, NOISY,
7734             "%s WARNING: Nvidia %s register %s does not exist!\n",
7735             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_ERRLOG_REG");
7736 
7737         NVSWITCH_PRINT(device, NOISY,
7738             "%s WARNING: Nvidia %s register %s does not exist!\n",
7739             __FUNCTION__, "NVLTL", "NV_NVLTL_TL_INTEN_REG");
7740 
7741         ret->linkErrInfo[i].TLErrlog = 0x0;
7742         ret->linkErrInfo[i].TLIntrEn = 0x0;
7743 
7744         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX);
7745         ret->linkErrInfo[i].DLSpeedStatusTx =
7746             DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, data);
7747 
7748         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX);
7749         ret->linkErrInfo[i].DLSpeedStatusRx =
7750             DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, data);
7751 
7752         data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR);
7753         ret->linkErrInfo[i].bExcessErrorDL =
7754             !!DRF_VAL(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, data);
7755 
7756         if (ret->linkErrInfo[i].bExcessErrorDL)
7757         {
7758             NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _INTR,
7759                 DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 0x1));
7760         }
7761     }
7762     FOR_EACH_INDEX_IN_MASK_END;
7763 
7764     return NVL_SUCCESS;
7765 }
7766 
7767 static NvlStatus
nvswitch_ctrl_clear_counters_lr10(nvswitch_device * device,NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS * ret)7768 nvswitch_ctrl_clear_counters_lr10
7769 (
7770     nvswitch_device *device,
7771     NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret
7772 )
7773 {
7774     nvlink_link *link;
7775     NvU8 i;
7776     NvU32 counterMask;
7777     NvlStatus status = NVL_SUCCESS;
7778 
7779     counterMask = ret->counterMask;
7780 
7781     // Common usage allows one of these to stand for all of them
7782     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7783                         | NVSWITCH_NVLINK_COUNTER_TL_TX1
7784                         | NVSWITCH_NVLINK_COUNTER_TL_RX0
7785                         | NVSWITCH_NVLINK_COUNTER_TL_RX1
7786                         ))
7787     {
7788         counterMask |= ( NVSWITCH_NVLINK_COUNTER_TL_TX0
7789                        | NVSWITCH_NVLINK_COUNTER_TL_TX1
7790                        | NVSWITCH_NVLINK_COUNTER_TL_RX0
7791                        | NVSWITCH_NVLINK_COUNTER_TL_RX1
7792                        );
7793     }
7794 
7795     // Common usage allows one of these to stand for all of them
7796     if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7797                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7798                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7799                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7800                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7801                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7802                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7803                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7804                         | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7805                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7806                         | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7807                         ))
7808     {
7809         counterMask |= ( NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT
7810                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0
7811                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1
7812                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2
7813                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3
7814                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4
7815                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5
7816                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6
7817                        | NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7
7818                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY
7819                        | NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY
7820                        );
7821     }
7822 
7823     FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask)
7824     {
7825         link = nvswitch_get_link(device, i);
7826         if (link == NULL)
7827         {
7828             continue;
7829         }
7830 
7831         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber))
7832         {
7833             nvswitch_ctrl_clear_throughput_counters_lr10(device, link, counterMask);
7834         }
7835         if (NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
7836         {
7837             status = nvswitch_ctrl_clear_dl_error_counters_lr10(device, link, counterMask);
7838             // Return early with failure on clearing through minion
7839             if (status != NVL_SUCCESS)
7840             {
7841                 NVSWITCH_PRINT(device, ERROR,
7842                     "%s: Failure on clearing link counter mask 0x%x on link %d\n",
7843                     __FUNCTION__, counterMask, link->linkNumber);
7844                 break;
7845             }
7846         }
7847     }
7848     FOR_EACH_INDEX_IN_MASK_END;
7849 
7850     return status;
7851 }
7852 
7853 NvlStatus
nvswitch_ctrl_set_nvlink_error_threshold_lr10(nvswitch_device * device,NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS * ret)7854 nvswitch_ctrl_set_nvlink_error_threshold_lr10
7855 (
7856     nvswitch_device *device,
7857     NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7858 )
7859 {
7860     return -NVL_ERR_NOT_SUPPORTED;
7861 }
7862 
7863 static NvlStatus
nvswitch_ctrl_get_nvlink_error_threshold_lr10(nvswitch_device * device,NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS * ret)7864 nvswitch_ctrl_get_nvlink_error_threshold_lr10
7865 (
7866     nvswitch_device *device,
7867     NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
7868 )
7869 {
7870     return -NVL_ERR_NOT_SUPPORTED;
7871 }
7872 
7873 NvlStatus
nvswitch_get_board_id_lr10(nvswitch_device * device,NvU16 * pBoardId)7874 nvswitch_get_board_id_lr10
7875 (
7876     nvswitch_device *device,
7877     NvU16 *pBoardId
7878 )
7879 {
7880     return -NVL_ERR_NOT_SUPPORTED;
7881 }
7882 
7883 NvlStatus
nvswitch_ctrl_get_soe_heartbeat_lr10(nvswitch_device * device,NVSWITCH_GET_SOE_HEARTBEAT_PARAMS * p)7884 nvswitch_ctrl_get_soe_heartbeat_lr10
7885 (
7886     nvswitch_device *device,
7887     NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p
7888 )
7889 {
7890     return -NVL_ERR_NOT_SUPPORTED;
7891 }
7892 
7893 static NvlStatus
nvswitch_cci_reset_and_drain_links_lr10(nvswitch_device * device,NvU64 link_mask,NvBool bForced)7894 nvswitch_cci_reset_and_drain_links_lr10
7895 (
7896     nvswitch_device *device,
7897     NvU64 link_mask,
7898     NvBool bForced
7899 )
7900 {
7901     return -NVL_ERR_NOT_SUPPORTED;
7902 }
7903 
7904 void
nvswitch_update_link_state_led_lr10(nvswitch_device * device)7905 nvswitch_update_link_state_led_lr10
7906 (
7907     nvswitch_device *device
7908 )
7909 {
7910     return;
7911 }
7912 
7913 void
nvswitch_led_shutdown_lr10(nvswitch_device * device)7914 nvswitch_led_shutdown_lr10
7915 (
7916     nvswitch_device *device
7917 )
7918 {
7919     return;
7920 }
7921 
7922 NvlStatus
nvswitch_check_io_sanity_lr10(nvswitch_device * device)7923 nvswitch_check_io_sanity_lr10
7924 (
7925     nvswitch_device *device
7926 )
7927 {
7928     return NVL_SUCCESS;
7929 }
7930 
7931 void
nvswitch_fsp_update_cmdq_head_tail_lr10(nvswitch_device * device,NvU32 queueHead,NvU32 queueTail)7932 nvswitch_fsp_update_cmdq_head_tail_lr10
7933 (
7934     nvswitch_device *device,
7935     NvU32 queueHead,
7936     NvU32 queueTail
7937 )
7938 {
7939     return; // -NVL_ERR_NOT_SUPPORTED;
7940 }
7941 
7942 void
nvswitch_fsp_get_cmdq_head_tail_lr10(nvswitch_device * device,NvU32 * pQueueHead,NvU32 * pQueueTail)7943 nvswitch_fsp_get_cmdq_head_tail_lr10
7944 (
7945     nvswitch_device *device,
7946     NvU32 *pQueueHead,
7947     NvU32 *pQueueTail
7948 )
7949 {
7950     return; // -NVL_ERR_NOT_SUPPORTED;
7951 }
7952 
7953 void
nvswitch_fsp_update_msgq_head_tail_lr10(nvswitch_device * device,NvU32 msgqHead,NvU32 msgqTail)7954 nvswitch_fsp_update_msgq_head_tail_lr10
7955 (
7956     nvswitch_device *device,
7957     NvU32 msgqHead,
7958     NvU32 msgqTail
7959 )
7960 {
7961     return; // -NVL_ERR_NOT_SUPPORTED;
7962 }
7963 
7964 void
nvswitch_fsp_get_msgq_head_tail_lr10(nvswitch_device * device,NvU32 * pMsgqHead,NvU32 * pMsgqTail)7965 nvswitch_fsp_get_msgq_head_tail_lr10
7966 (
7967     nvswitch_device *device,
7968     NvU32 *pMsgqHead,
7969     NvU32 *pMsgqTail
7970 )
7971 {
7972    return; // -NVL_ERR_NOT_SUPPORTED;
7973 }
7974 
7975 NvU32
nvswitch_fsp_get_channel_size_lr10(nvswitch_device * device)7976 nvswitch_fsp_get_channel_size_lr10
7977 (
7978     nvswitch_device *device
7979 )
7980 {
7981     return 0; // -NVL_ERR_NOT_SUPPORTED;
7982 }
7983 
7984 NvU8
nvswitch_fsp_nvdm_to_seid_lr10(nvswitch_device * device,NvU8 nvdmType)7985 nvswitch_fsp_nvdm_to_seid_lr10
7986 (
7987     nvswitch_device *device,
7988     NvU8 nvdmType
7989 )
7990 {
7991     return -NVL_ERR_NOT_SUPPORTED;
7992 }
7993 
7994 NvU32
nvswitch_fsp_create_mctp_header_lr10(nvswitch_device * device,NvU8 som,NvU8 eom,NvU8 seid,NvU8 seq)7995 nvswitch_fsp_create_mctp_header_lr10
7996 (
7997     nvswitch_device *device,
7998     NvU8 som,
7999     NvU8 eom,
8000     NvU8 seid,
8001     NvU8 seq
8002 )
8003 {
8004     return -NVL_ERR_NOT_SUPPORTED;
8005 }
8006 
8007 NvU32
nvswitch_fsp_create_nvdm_header_lr10(nvswitch_device * device,NvU32 nvdmType)8008 nvswitch_fsp_create_nvdm_header_lr10
8009 (
8010     nvswitch_device *device,
8011     NvU32 nvdmType
8012 )
8013 {
8014     return 0; // -NVL_ERR_NOT_SUPPORTED;
8015 }
8016 
8017 NvlStatus
nvswitch_fsp_get_packet_info_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size,NvU8 * pPacketState,NvU8 * pTag)8018 nvswitch_fsp_get_packet_info_lr10
8019 (
8020     nvswitch_device *device,
8021     NvU8 *pBuffer,
8022     NvU32 size,
8023     NvU8 *pPacketState,
8024     NvU8 *pTag
8025 )
8026 {
8027     return -NVL_ERR_NOT_SUPPORTED;
8028 }
8029 
8030 NvlStatus
nvswitch_fsp_validate_mctp_payload_header_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size)8031 nvswitch_fsp_validate_mctp_payload_header_lr10
8032 (
8033     nvswitch_device  *device,
8034     NvU8 *pBuffer,
8035     NvU32 size
8036 )
8037 {
8038     return -NVL_ERR_NOT_SUPPORTED;
8039 }
8040 
8041 NvlStatus
nvswitch_fsp_process_nvdm_msg_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size)8042 nvswitch_fsp_process_nvdm_msg_lr10
8043 (
8044     nvswitch_device *device,
8045     NvU8 *pBuffer,
8046     NvU32 size
8047 )
8048 {
8049     return -NVL_ERR_NOT_SUPPORTED;
8050 }
8051 
8052 NvlStatus
nvswitch_fsp_process_cmd_response_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size)8053 nvswitch_fsp_process_cmd_response_lr10
8054 (
8055     nvswitch_device *device,
8056     NvU8 *pBuffer,
8057     NvU32 size
8058 )
8059 {
8060     return -NVL_ERR_NOT_SUPPORTED;
8061 }
8062 
8063 NvlStatus
nvswitch_fsp_config_ememc_lr10(nvswitch_device * device,NvU32 offset,NvBool bAincw,NvBool bAincr)8064 nvswitch_fsp_config_ememc_lr10
8065 (
8066     nvswitch_device *device,
8067     NvU32 offset,
8068     NvBool bAincw,
8069     NvBool bAincr
8070 )
8071 {
8072     return -NVL_ERR_NOT_SUPPORTED;
8073 }
8074 
8075 NvlStatus
nvswitch_fsp_write_to_emem_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size)8076 nvswitch_fsp_write_to_emem_lr10
8077 (
8078     nvswitch_device *device,
8079     NvU8 *pBuffer,
8080     NvU32 size
8081 )
8082 {
8083     return -NVL_ERR_NOT_SUPPORTED;
8084 }
8085 
8086 NvlStatus
nvswitch_fsp_read_from_emem_lr10(nvswitch_device * device,NvU8 * pBuffer,NvU32 size)8087 nvswitch_fsp_read_from_emem_lr10
8088 (
8089     nvswitch_device *device,
8090     NvU8 *pBuffer,
8091     NvU32 size
8092 )
8093 {
8094     return -NVL_ERR_NOT_SUPPORTED;
8095 }
8096 
8097 NvlStatus
nvswitch_fsp_error_code_to_nvlstatus_map_lr10(nvswitch_device * device,NvU32 errorCode)8098 nvswitch_fsp_error_code_to_nvlstatus_map_lr10
8099 (
8100     nvswitch_device *device,
8101     NvU32 errorCode
8102 )
8103 {
8104     return -NVL_ERR_NOT_SUPPORTED;
8105 }
8106 
8107 NvlStatus
nvswitch_fsprpc_get_caps_lr10(nvswitch_device * device,NVSWITCH_FSPRPC_GET_CAPS_PARAMS * params)8108 nvswitch_fsprpc_get_caps_lr10
8109 (
8110     nvswitch_device *device,
8111     NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params
8112 )
8113 {
8114     return -NVL_ERR_NOT_SUPPORTED;
8115 }
8116 
8117 NvlStatus
nvswitch_detect_tnvl_mode_lr10(nvswitch_device * device)8118 nvswitch_detect_tnvl_mode_lr10
8119 (
8120     nvswitch_device *device
8121 )
8122 {
8123     return -NVL_ERR_NOT_SUPPORTED;
8124 }
8125 
8126 NvBool
nvswitch_is_tnvl_mode_enabled_lr10(nvswitch_device * device)8127 nvswitch_is_tnvl_mode_enabled_lr10
8128 (
8129     nvswitch_device *device
8130 )
8131 {
8132     return NV_FALSE;
8133 }
8134 
8135 NvBool
nvswitch_is_tnvl_mode_locked_lr10(nvswitch_device * device)8136 nvswitch_is_tnvl_mode_locked_lr10
8137 (
8138     nvswitch_device *device
8139 )
8140 {
8141     return NV_FALSE;
8142 }
8143 
8144 NvlStatus
nvswitch_tnvl_get_attestation_certificate_chain_lr10(nvswitch_device * device,NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS * params)8145 nvswitch_tnvl_get_attestation_certificate_chain_lr10
8146 (
8147     nvswitch_device *device,
8148     NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params
8149 )
8150 {
8151     // Not supported in LR10
8152     return -NVL_ERR_NOT_SUPPORTED;
8153 }
8154 
8155 NvlStatus
nvswitch_tnvl_get_attestation_report_lr10(nvswitch_device * device,NVSWITCH_GET_ATTESTATION_REPORT_PARAMS * params)8156 nvswitch_tnvl_get_attestation_report_lr10
8157 (
8158     nvswitch_device *device,
8159     NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params
8160 )
8161 {
8162     // Not supported in LR10
8163     return -NVL_ERR_NOT_SUPPORTED;
8164 }
8165 
8166 NvlStatus
nvswitch_tnvl_send_fsp_lock_config_lr10(nvswitch_device * device)8167 nvswitch_tnvl_send_fsp_lock_config_lr10
8168 (
8169     nvswitch_device *device
8170 )
8171 {
8172     // Not supported in LR10
8173     return -NVL_ERR_NOT_SUPPORTED;
8174 }
8175 
8176 NvlStatus
nvswitch_tnvl_get_status_lr10(nvswitch_device * device,NVSWITCH_GET_TNVL_STATUS_PARAMS * params)8177 nvswitch_tnvl_get_status_lr10
8178 (
8179     nvswitch_device *device,
8180     NVSWITCH_GET_TNVL_STATUS_PARAMS *params
8181 )
8182 {
8183     // Not supported in LR10
8184     return -NVL_ERR_NOT_SUPPORTED;
8185 }
8186 
8187 //
8188 // This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
8189 // macro in haldef_nvswitch.h
8190 //
8191 // Note: All hal fns must be implemented for each chip.
8192 //       There is no automatic stubbing here.
8193 //
nvswitch_setup_hal_lr10(nvswitch_device * device)8194 void nvswitch_setup_hal_lr10(nvswitch_device *device)
8195 {
8196     device->chip_arch = NVSWITCH_GET_INFO_INDEX_ARCH_LR10;
8197 
8198     {
8199         device->chip_impl = NVSWITCH_GET_INFO_INDEX_IMPL_LR10;
8200     }
8201 
8202     NVSWITCH_INIT_HAL(device, lr10);
8203     NVSWITCH_INIT_HAL_LS10(device, lr10);
8204 }
8205