1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "nvlink_export.h" 25 #include "common_nvswitch.h" 26 #include "error_nvswitch.h" 27 #include "regkey_nvswitch.h" 28 #include "haldef_nvswitch.h" 29 #include "nvlink_inband_msg.h" 30 #include "rmsoecmdif.h" 31 32 #include "ls10/ls10.h" 33 #include "lr10/lr10.h" 34 #include "ls10/clock_ls10.h" 35 #include "ls10/inforom_ls10.h" 36 #include "ls10/minion_ls10.h" 37 #include "ls10/pmgr_ls10.h" 38 #include "ls10/therm_ls10.h" 39 #include "ls10/smbpbi_ls10.h" 40 #include "ls10/cci_ls10.h" 41 #include "cci/cci_nvswitch.h" 42 #include "ls10/multicast_ls10.h" 43 #include "ls10/soe_ls10.h" 44 #include "ls10/gfw_ls10.h" 45 46 #include "nvswitch/ls10/dev_nvs_top.h" 47 #include "nvswitch/ls10/ptop_discovery_ip.h" 48 #include "nvswitch/ls10/dev_pri_masterstation_ip.h" 49 #include "nvswitch/ls10/dev_pri_hub_sys_ip.h" 50 #include "nvswitch/ls10/dev_nvlw_ip.h" 51 #include "nvswitch/ls10/dev_nvlsaw_ip.h" 52 #include "nvswitch/ls10/dev_nvlsaw_ip_addendum.h" 53 #include "nvswitch/ls10/dev_nvltlc_ip.h" 54 #include "nvswitch/ls10/dev_nvldl_ip.h" 55 #include "nvswitch/ls10/dev_nport_ip.h" 56 #include "nvswitch/ls10/dev_route_ip.h" 57 #include "nvswitch/ls10/dev_nport_ip_addendum.h" 58 #include "nvswitch/ls10/dev_route_ip_addendum.h" 59 #include "nvswitch/ls10/dev_ingress_ip.h" 60 #include "nvswitch/ls10/dev_egress_ip.h" 61 #include "nvswitch/ls10/dev_tstate_ip.h" 62 #include "nvswitch/ls10/dev_sourcetrack_ip.h" 63 #include "nvswitch/ls10/dev_cpr_ip.h" 64 #include "nvswitch/ls10/dev_nvlipt_lnk_ip.h" 65 #include "nvswitch/ls10/dev_minion_ip.h" 66 #include "nvswitch/ls10/dev_minion_ip_addendum.h" 67 #include "nvswitch/ls10/dev_multicasttstate_ip.h" 68 #include "nvswitch/ls10/dev_reductiontstate_ip.h" 69 #include "ls10/minion_nvlink_defines_public_ls10.h" 70 #include "nvswitch/ls10/dev_pmgr.h" 71 #include "nvswitch/ls10/dev_timer_ip.h" 72 73 #define NVSWITCH_IFR_MIN_BIOS_VER_LS10 0x9610170000ull 74 #define NVSWITCH_SMBPBI_MIN_BIOS_VER_LS10 0x9610220000ull 75 76 void * 77 nvswitch_alloc_chipdevice_ls10 78 ( 79 nvswitch_device *device 80 ) 81 { 82 void *chip_device; 83 84 chip_device = nvswitch_os_malloc(sizeof(ls10_device)); 85 if (NULL != chip_device) 86 { 87 nvswitch_os_memset(chip_device, 0, sizeof(ls10_device)); 88 } 89 90 device->chip_id = NV_PMC_BOOT_42_CHIP_ID_LS10; 91 return(chip_device); 92 } 93 94 /* 95 * @Brief : Initializes the PRI Ring 96 * 97 * @Description : An example of a function that we'd like to generate from SU. 98 * 99 * @paramin device a reference to the device to initialize 100 * 101 * @returns NVL_SUCCESS if the action succeeded 102 */ 103 NvlStatus 104 nvswitch_pri_ring_init_ls10 105 ( 106 nvswitch_device *device 107 ) 108 { 109 NvU32 checked_data; 110 NvU32 command; 111 NvBool keepPolling; 112 NVSWITCH_TIMEOUT timeout; 113 114 if (!IS_FMODEL(device)) 115 { 116 // check if FSP successfully started 117 nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); 118 do 119 { 120 keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 121 122 command = NVSWITCH_REG_RD32(device, _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS); 123 if (FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, command)) 124 { 125 break; 126 } 127 128 nvswitch_os_sleep(1); 129 } 130 while (keepPolling); 131 if (!FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, command)) 132 { 133 NVSWITCH_RAW_ERROR_LOG_TYPE report = {0, { 0 }}; 134 NVSWITCH_RAW_ERROR_LOG_TYPE report_saw = {0, { 0 }}; 135 NvU32 report_idx = 0; 136 NvU32 i; 137 138 report.data[report_idx++] = command; 139 NVSWITCH_PRINT(device, ERROR, "%s: -- _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS (0x%x) != _SUCCESS --\n", 140 __FUNCTION__, command); 141 142 for (i = 0; i <= 15; i++) 143 { 144 command = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SW_SCRATCH(i)); 145 report_saw.data[i] = command; 146 NVSWITCH_PRINT(device, ERROR, "%s: -- NV_NVLSAW_SW_SCRATCH(%d) = 0x%08x\n", 147 __FUNCTION__, i, command); 148 } 149 150 for (i = 0; i < NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1; i++) 151 { 152 command = NVSWITCH_REG_RD32(device, _PFSP, _FALCON_COMMON_SCRATCH_GROUP_2(i)); 153 report.data[report_idx++] = command; 154 NVSWITCH_PRINT(device, ERROR, "%s: -- NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(%d) = 0x%08x\n", 155 __FUNCTION__, i, command); 156 } 157 158 // Include useful scratch information for triage 159 NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE, 160 "Fatal, Firmware initialization failure (0x%x/0x%x, 0x%x, 0x%x, 0x%x/0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", 161 report.data[0], report.data[1], report.data[2], report.data[3], report.data[4], 162 report_saw.data[0], report_saw.data[1], report_saw.data[12], report_saw.data[14], report_saw.data[15]); 163 return -NVL_INITIALIZATION_TOTAL_FAILURE; 164 } 165 166 command = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRI_RING_INIT); 167 if (FLD_TEST_DRF(_PPRIV_SYS, _PRI_RING_INIT, _STATUS, _ALIVE, command)) 168 { 169 // _STATUS == ALIVE. Skipping 170 return NVL_SUCCESS; 171 } 172 173 if (!FLD_TEST_DRF(_PPRIV_SYS, _PRI_RING_INIT, _STATUS, _ALIVE_IN_SAFE_MODE, command)) 174 { 175 NVSWITCH_PRINT(device, ERROR, "%s: -- Initial _STATUS (0x%x) != _ALIVE_IN_SAFE_MODE --\n", 176 __FUNCTION__, DRF_VAL(_PPRIV_SYS, _PRI_RING_INIT, _STATUS, command)); 177 return -NVL_ERR_GENERIC; 178 } 179 180 // .Switch PRI Ring Init Sequence 181 182 // ***** 183 184 // . [SW] Enumerate and start the PRI Ring 185 186 NVSWITCH_ENG_WR32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRI_RING_INIT, 187 DRF_DEF(_PPRIV_SYS, _PRI_RING_INIT, _CMD, _ENUMERATE_AND_START)); 188 189 // . [SW] Wait for the command to complete 190 191 if (IS_EMULATION(device)) 192 { 193 nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 194 } 195 else 196 { 197 nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 198 } 199 200 do 201 { 202 keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 203 command = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRI_RING_INIT); 204 205 if ( FLD_TEST_DRF(_PPRIV_SYS,_PRI_RING_INIT,_CMD,_NONE,command) ) 206 { 207 break; 208 } 209 if ( keepPolling == NV_FALSE ) 210 { 211 NVSWITCH_PRINT(device, ERROR, "%s: -- Timeout waiting for _CMD == _NONE --\n", __FUNCTION__); 212 return -NVL_ERR_GENERIC; 213 } 214 } 215 while (keepPolling); 216 217 // . [SW] Confirm PRI Ring initialized properly. Executing four reads to introduce a delay. 218 219 if (IS_EMULATION(device)) 220 { 221 nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); 222 } 223 else 224 { 225 nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 226 } 227 228 do 229 { 230 keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 231 command = NVSWITCH_ENG_RD32(device, SYS_PRI_HUB, , 0, _PPRIV_SYS, _PRI_RING_INIT); 232 233 if ( FLD_TEST_DRF(_PPRIV_SYS, _PRI_RING_INIT, _STATUS, _ALIVE, command) ) 234 { 235 break; 236 } 237 if ( keepPolling == NV_FALSE ) 238 { 239 NVSWITCH_PRINT(device, ERROR, "%s: -- Timeout waiting for _STATUS == _ALIVE --\n", __FUNCTION__); 240 return -NVL_ERR_GENERIC; 241 } 242 } 243 while (keepPolling); 244 245 // . [SW] PRI Ring Interrupt Status0 and Status1 should be clear unless there was an error. 246 247 checked_data = NVSWITCH_ENG_RD32(device, PRI_MASTER_RS, , 0, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0); 248 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, _DISCONNECT_FAULT, 0x0, checked_data) ) 249 { 250 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS0,_DISCONNECT_FAULT != 0x0\n", __FUNCTION__); 251 } 252 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, _GBL_WRITE_ERROR_FBP, 0x0, checked_data) ) 253 { 254 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS0,_GBL_WRITE_ERROR_FBP != 0x0\n", __FUNCTION__); 255 } 256 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, _GBL_WRITE_ERROR_SYS, 0x0, checked_data) ) 257 { 258 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS0,_GBL_WRITE_ERROR_SYS != 0x0\n", __FUNCTION__); 259 } 260 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, _OVERFLOW_FAULT, 0x0, checked_data) ) 261 { 262 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS0,_OVERFLOW_FAULT != 0x0\n", __FUNCTION__); 263 } 264 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, _RING_START_CONN_FAULT, 0x0, checked_data) ) 265 { 266 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS0,_RING_START_CONN_FAULT != 0x0\n", __FUNCTION__); 267 } 268 269 checked_data = NVSWITCH_ENG_RD32(device, PRI_MASTER_RS, , 0, _PPRIV_MASTER, _RING_INTERRUPT_STATUS1); 270 if ( !FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS1, _GBL_WRITE_ERROR_GPC, 0x0, checked_data) ) 271 { 272 NVSWITCH_PRINT(device, ERROR, "%s: _PPRIV_MASTER,_RING_INTERRUPT_STATUS1,_GBL_WRITE_ERROR_GPC != 0x0\n", __FUNCTION__); 273 } 274 275 // ***** 276 } 277 278 return NVL_SUCCESS; 279 } 280 281 /* 282 * @Brief : Destroys an NvSwitch hardware state 283 * 284 * @Description : 285 * 286 * @param[in] device a reference to the device to initialize 287 */ 288 void 289 nvswitch_destroy_device_state_ls10 290 ( 291 nvswitch_device *device 292 ) 293 { 294 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 295 296 if (nvswitch_is_soe_supported(device)) 297 { 298 nvswitch_soe_unregister_events(device); 299 nvswitch_unload_soe_ls10(device); 300 } 301 302 if (chip_device != NULL) 303 { 304 if ((chip_device->latency_stats) != NULL) 305 { 306 nvswitch_os_free(chip_device->latency_stats); 307 } 308 309 if ((chip_device->ganged_link_table) != NULL) 310 { 311 nvswitch_os_free(chip_device->ganged_link_table); 312 } 313 314 nvswitch_free_chipdevice(device); 315 } 316 317 nvswitch_i2c_destroy(device); 318 319 return; 320 } 321 322 NvlStatus 323 nvswitch_initialize_pmgr_ls10 324 ( 325 nvswitch_device *device 326 ) 327 { 328 // Init PMGR info 329 nvswitch_init_pmgr_ls10(device); 330 nvswitch_init_pmgr_devices_ls10(device); 331 332 return NVL_SUCCESS; 333 } 334 335 336 NvlStatus 337 nvswitch_initialize_ip_wrappers_ls10 338 ( 339 nvswitch_device *device 340 ) 341 { 342 NvlStatus status = NVL_SUCCESS; 343 344 // 345 // Now that software knows the devices and addresses, it must take all 346 // the wrapper modules out of reset. 347 // 348 349 status = nvswitch_nvs_top_prod_ls10(device); 350 if (status != NVL_SUCCESS) 351 { 352 NVSWITCH_PRINT(device, ERROR, 353 "%s: TOP PROD initialization failed.\n", 354 __FUNCTION__); 355 return status; 356 } 357 358 status = nvswitch_apply_prod_nvlw_ls10(device); 359 if (status != NVL_SUCCESS) 360 { 361 NVSWITCH_PRINT(device, ERROR, 362 "%s: NVLW PROD initialization failed.\n", 363 __FUNCTION__); 364 return status; 365 } 366 367 status = nvswitch_apply_prod_nxbar_ls10(device); 368 if (status != NVL_SUCCESS) 369 { 370 NVSWITCH_PRINT(device, ERROR, 371 "%s: NXBAR PROD initialization failed.\n", 372 __FUNCTION__); 373 return status; 374 } 375 376 return status; 377 } 378 379 void 380 nvswitch_set_ganged_link_table_ls10 381 ( 382 nvswitch_device *device, 383 NvU32 firstIndex, 384 NvU64 *ganged_link_table, 385 NvU32 numEntries 386 ) 387 { 388 NvU32 i; 389 390 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _ROUTE, _REG_TABLE_ADDRESS, 391 DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _INDEX, firstIndex) | 392 DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _AUTO_INCR, 1)); 393 394 for (i = 0; i < numEntries; i++) 395 { 396 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _ROUTE, _REG_TABLE_DATA1, 397 NvU64_HI32(ganged_link_table[i])); 398 399 // HW will fill in the ECC 400 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _ROUTE, _REG_TABLE_DATA2, 401 0); 402 403 // 404 // Writing DATA0 triggers the latched data to be written to the table 405 // So write it last 406 // 407 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _ROUTE, _REG_TABLE_DATA0, 408 NvU64_LO32(ganged_link_table[i])); 409 } 410 } 411 412 static NvlStatus 413 _nvswitch_init_ganged_link_routing_ls10 414 ( 415 nvswitch_device *device 416 ) 417 { 418 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 419 NvU32 gang_size; 420 NvU64 gang_entry; 421 NvU32 glt_entries = 16; 422 NvU32 glt_size = (NV_ROUTE_REG_TABLE_ADDRESS_INDEX_GLTAB_DEPTH + 1); 423 NvU64 *ganged_link_table = NULL; 424 NvU32 i; 425 NvU32 glt_index; 426 427 // 428 // The ganged link routing table is composed of 256 entries of 64-bits in 429 // size. Each entry is divided into 16 4-bit fields GLX(i), where GLX(x) 430 // contains the distribution pattern for x ports. Zero ports is not a 431 // valid configuration, so GLX(0) corresponds with 16 ports. 432 // Each GLX(i) column therefore should contain a uniform distribution 433 // pattern for i ports. 434 // 435 // The ganged link routing table will be loaded with following values: 436 // (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), 437 // (1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1), 438 // (2,0,0,2,2,2,2,2,2,2,2,2,2,2,2,2), 439 // (3,0,1,0,3,3,3,3,3,3,3,3,3,3,3,3), 440 // : 441 // (E,0,0,2,2,4,2,2,6,2,4,1,2,7,2,E), 442 // (F,0,1,0,3,0,3,3,7,3,5,2,3,8,3,0) 443 // 444 // Refer table 22: Definition of size bits used with Ganged Link Number Table. 445 // 446 447 //Alloc memory for Ganged Link Table 448 ganged_link_table = nvswitch_os_malloc(glt_size * sizeof(gang_entry)); 449 if (ganged_link_table == NULL) 450 { 451 NVSWITCH_PRINT(device, ERROR, 452 "Failed to allocate memory for GLT!!\n"); 453 return -NVL_NO_MEM; 454 } 455 456 for (glt_index = 0; glt_index < glt_size; glt_index++) 457 { 458 gang_entry = 0; 459 for (i = 0; i < glt_entries; i++) 460 { 461 gang_size = ((i==0) ? 16 : i); 462 gang_entry |= 463 DRF_NUM64(_ROUTE, _REG_TABLE_DATA0, _GLX(i), glt_index % gang_size); 464 } 465 466 ganged_link_table[glt_index] = gang_entry; 467 } 468 469 nvswitch_set_ganged_link_table_ls10(device, 0, ganged_link_table, glt_size); 470 471 chip_device->ganged_link_table = ganged_link_table; 472 473 return NVL_SUCCESS; 474 } 475 476 static void 477 _nvswitch_init_cmd_routing_ls10 478 ( 479 nvswitch_device *device 480 ) 481 { 482 NvU32 val; 483 484 //Set Hash policy for the requests. 485 val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN1, _SPRAY) | 486 DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN2, _SPRAY) | 487 DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN4, _SPRAY) | 488 DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN7, _SPRAY); 489 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _ROUTE, _CMD_ROUTE_TABLE0, val); 490 491 // Set Random policy for reponses. 492 val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN16, _RANDOM) | 493 DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN17, _RANDOM); 494 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _ROUTE, _CMD_ROUTE_TABLE2, val); 495 } 496 497 static NvlStatus 498 _nvswitch_init_portstat_counters_ls10 499 ( 500 nvswitch_device *device 501 ) 502 { 503 NvlStatus retval; 504 NvU32 idx_channel; 505 NVSWITCH_SET_LATENCY_BINS default_latency_bins; 506 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 507 508 chip_device->latency_stats = nvswitch_os_malloc(sizeof(NVSWITCH_LATENCY_STATS_LS10)); 509 if (chip_device->latency_stats == NULL) 510 { 511 NVSWITCH_PRINT(device, ERROR, "%s: Failed allocate memory for latency stats\n", 512 __FUNCTION__); 513 return -NVL_NO_MEM; 514 } 515 516 nvswitch_os_memset(chip_device->latency_stats, 0, sizeof(NVSWITCH_LATENCY_STATS_LS10)); 517 518 // 519 // These bin thresholds are values provided by Arch based off 520 // switch latency expectations. 521 // 522 for (idx_channel=0; idx_channel < NVSWITCH_NUM_VCS_LS10; idx_channel++) 523 { 524 default_latency_bins.bin[idx_channel].lowThreshold = 120; // 120ns 525 default_latency_bins.bin[idx_channel].medThreshold = 200; // 200ns 526 default_latency_bins.bin[idx_channel].hiThreshold = 1000; // 1us 527 } 528 529 // 530 // 6 hour sample interval 531 // The 48-bit counters can theoretically rollover after ~12 hours of full 532 // throttle traffic. 533 // 534 chip_device->latency_stats->sample_interval_msec = 6 * 60 * 60 * 1000; 535 536 retval = nvswitch_ctrl_set_latency_bins(device, &default_latency_bins); 537 if (retval != NVL_SUCCESS) 538 { 539 NVSWITCH_PRINT(device, ERROR, "%s: Failed to set latency bins\n", 540 __FUNCTION__); 541 NVSWITCH_ASSERT(0); 542 return retval; 543 } 544 545 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_CONTROL, 546 DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _RANGESELECT, _BITS13TO0)); 547 548 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_0, 549 DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_0, _SRCFILTERBIT, 0xFFFFFFFF)); 550 551 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_1, 552 DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_1, _SRCFILTERBIT, 0xFFFFFFFF)); 553 554 NVSWITCH_SAW_WR32_LS10(device, _NVLSAW, _GLBLLATENCYTIMERCTRL, 555 DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, _ENABLE)); 556 557 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, 558 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | 559 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); 560 561 // Start & Clear Residency Counters 562 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, 563 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 564 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 565 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, 566 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 567 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 568 569 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, 570 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 571 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 572 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, 573 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 574 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 575 576 // Start & Clear Stall/Busy Counters 577 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, 578 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 579 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 580 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, 581 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 582 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 583 584 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, 585 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 586 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 587 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, 588 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 589 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 590 591 return NVL_SUCCESS; 592 } 593 594 NvlStatus 595 nvswitch_initialize_route_ls10 596 ( 597 nvswitch_device *device 598 ) 599 { 600 NvlStatus retval; 601 602 retval = _nvswitch_init_ganged_link_routing_ls10(device); 603 if (NVL_SUCCESS != retval) 604 { 605 NVSWITCH_PRINT(device, ERROR, 606 "%s: Failed to initialize GLT\n", 607 __FUNCTION__); 608 goto nvswitch_initialize_route_exit; 609 } 610 611 _nvswitch_init_cmd_routing_ls10(device); 612 613 // Initialize Portstat Counters 614 retval = _nvswitch_init_portstat_counters_ls10(device); 615 if (NVL_SUCCESS != retval) 616 { 617 NVSWITCH_PRINT(device, ERROR, 618 "%s: Failed to initialize portstat counters\n", 619 __FUNCTION__); 620 goto nvswitch_initialize_route_exit; 621 } 622 623 // TODO: Setup multicast/reductions 624 625 nvswitch_initialize_route_exit: 626 return retval; 627 } 628 629 NvlStatus 630 nvswitch_ctrl_get_counters_ls10 631 ( 632 nvswitch_device *device, 633 NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret 634 ) 635 { 636 nvlink_link *link; 637 NvU8 i; 638 NvU32 counterMask; 639 NvU32 data; 640 NvU32 val; 641 NvU64 tx0TlCount; 642 NvU64 tx1TlCount; 643 NvU64 rx0TlCount; 644 NvU64 rx1TlCount; 645 NvU32 laneId; 646 NvBool bLaneReversed; 647 NvlStatus status; 648 NvBool minion_enabled; 649 650 ct_assert(NVSWITCH_NUM_LANES_LS10 <= NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE); 651 652 link = nvswitch_get_link(device, ret->linkId); 653 if ((link == NULL) || 654 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, link->linkNumber)) 655 { 656 return -NVL_BAD_ARGS; 657 } 658 659 minion_enabled = nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)); 660 661 counterMask = ret->counterMask; 662 663 // Common usage allows one of these to stand for all of them 664 if (counterMask & (NVSWITCH_NVLINK_COUNTER_TL_TX0 | 665 NVSWITCH_NVLINK_COUNTER_TL_TX1 | 666 NVSWITCH_NVLINK_COUNTER_TL_RX0 | 667 NVSWITCH_NVLINK_COUNTER_TL_RX1)) 668 { 669 tx0TlCount = nvswitch_read_64bit_counter(device, 670 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)), 671 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0))); 672 if (NVBIT64(63) & tx0TlCount) 673 { 674 ret->bTx0TlCounterOverflow = NV_TRUE; 675 tx0TlCount &= ~(NVBIT64(63)); 676 } 677 678 tx1TlCount = nvswitch_read_64bit_counter(device, 679 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(1)), 680 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(1))); 681 if (NVBIT64(63) & tx1TlCount) 682 { 683 ret->bTx1TlCounterOverflow = NV_TRUE; 684 tx1TlCount &= ~(NVBIT64(63)); 685 } 686 687 rx0TlCount = nvswitch_read_64bit_counter(device, 688 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)), 689 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0))); 690 if (NVBIT64(63) & rx0TlCount) 691 { 692 ret->bRx0TlCounterOverflow = NV_TRUE; 693 rx0TlCount &= ~(NVBIT64(63)); 694 } 695 696 rx1TlCount = nvswitch_read_64bit_counter(device, 697 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(1)), 698 NVSWITCH_LINK_OFFSET_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(1))); 699 if (NVBIT64(63) & rx1TlCount) 700 { 701 ret->bRx1TlCounterOverflow = NV_TRUE; 702 rx1TlCount &= ~(NVBIT64(63)); 703 } 704 705 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX0)] = tx0TlCount; 706 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX1)] = tx1TlCount; 707 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX0)] = rx0TlCount; 708 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX1)] = rx1TlCount; 709 } 710 711 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT) 712 { 713 if (minion_enabled) 714 { 715 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 716 NV_NVLSTAT_RX01, 0, &data); 717 if (status != NVL_SUCCESS) 718 { 719 return status; 720 } 721 data = DRF_VAL(_NVLSTAT, _RX01, _FLIT_CRC_ERRORS_VALUE, data); 722 } 723 else 724 { 725 // MINION disabled 726 data = 0; 727 } 728 729 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)] 730 = data; 731 } 732 733 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED) 734 { 735 if (minion_enabled) 736 { 737 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 738 NV_NVLSTAT_RX02, 0, &data); 739 if (status != NVL_SUCCESS) 740 { 741 return status; 742 } 743 data = DRF_VAL(_NVLSTAT, _RX02, _MASKED_CRC_ERRORS_VALUE, data); 744 } 745 else 746 { 747 // MINION disabled 748 data = 0; 749 } 750 751 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED)] 752 = data; 753 } 754 data = 0x0; 755 bLaneReversed = nvswitch_link_lane_reversed_ls10(device, link->linkNumber); 756 757 for (laneId = 0; laneId < NVSWITCH_NUM_LANES_LS10; laneId++) 758 { 759 // 760 // HW may reverse the lane ordering or it may be overridden by SW. 761 // If so, invert the interpretation of the lane CRC errors. 762 // 763 i = (NvU8)((bLaneReversed) ? (NVSWITCH_NUM_LANES_LS10 - 1) - laneId : laneId); 764 765 if (minion_enabled) 766 { 767 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 768 NV_NVLSTAT_DB01, 0, &data); 769 if (status != NVL_SUCCESS) 770 { 771 return status; 772 } 773 } 774 else 775 { 776 // MINION disabled 777 data = 0; 778 } 779 780 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId)) 781 { 782 val = BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId)); 783 784 switch (i) 785 { 786 case 0: 787 ret->nvlinkCounters[val] 788 = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L0, data); 789 break; 790 case 1: 791 ret->nvlinkCounters[val] 792 = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L1, data); 793 break; 794 } 795 } 796 } 797 798 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY) 799 { 800 if (minion_enabled) 801 { 802 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 803 NV_NVLSTAT_TX09, 0, &data); 804 if (status != NVL_SUCCESS) 805 { 806 return status; 807 } 808 data = DRF_VAL(_NVLSTAT, _TX09, _REPLAY_EVENTS_VALUE, data); 809 } 810 else 811 { 812 // MINION disabled 813 data = 0; 814 } 815 816 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)] 817 = data; 818 } 819 820 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY) 821 { 822 if (minion_enabled) 823 { 824 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 825 NV_NVLSTAT_LNK1, 0, &data); 826 if (status != NVL_SUCCESS) 827 { 828 return status; 829 } 830 data = DRF_VAL(_NVLSTAT, _LNK1, _ERROR_COUNT1_RECOVERY_EVENTS_VALUE, data); 831 } 832 else 833 { 834 // MINION disabled 835 data = 0; 836 } 837 838 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)] 839 = data; 840 } 841 842 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY) 843 { 844 if (minion_enabled) 845 { 846 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 847 NV_NVLSTAT_RX00, 0, &data); 848 if (status != NVL_SUCCESS) 849 { 850 return status; 851 } 852 data = DRF_VAL(_NVLSTAT, _RX00, _REPLAY_EVENTS_VALUE, data); 853 } 854 else 855 { 856 // MINION disabled 857 data = 0; 858 } 859 860 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)] 861 = data; 862 } 863 864 if ((counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS) || 865 (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)) 866 { 867 if (minion_enabled) 868 { 869 status = nvswitch_minion_get_dl_status(device, link->linkNumber, 870 NV_NVLSTAT_DB11, 0, &data); 871 if (status != NVL_SUCCESS) 872 { 873 return status; 874 } 875 } 876 else 877 { 878 // MINION disabled 879 data = 0; 880 } 881 882 if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS) 883 { 884 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)] 885 = DRF_VAL(_NVLSTAT_DB11, _COUNT_PHY_REFRESH, _PASS, data); 886 } 887 888 if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL) 889 { 890 ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)] 891 = DRF_VAL(_NVLSTAT_DB11, _COUNT_PHY_REFRESH, _FAIL, data); 892 } 893 } 894 895 return NVL_SUCCESS; 896 } 897 898 NvlStatus 899 nvswitch_ctrl_get_sw_info_ls10 900 ( 901 nvswitch_device *device, 902 NVSWITCH_GET_SW_INFO_PARAMS *p 903 ) 904 { 905 NvlStatus retval = NVL_SUCCESS; 906 NvU32 i; 907 908 if (p->count > NVSWITCH_GET_SW_INFO_COUNT_MAX) 909 { 910 NVSWITCH_PRINT(device, ERROR, 911 "%s: Invalid args\n", 912 __FUNCTION__); 913 return -NVL_BAD_ARGS; 914 } 915 916 nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_SW_INFO_COUNT_MAX); 917 918 for (i = 0; i < p->count; i++) 919 { 920 switch (p->index[i]) 921 { 922 case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED: 923 p->info[i] = NV_TRUE; 924 break; 925 case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED: 926 p->info[i] = NV_TRUE; 927 break; 928 default: 929 NVSWITCH_PRINT(device, ERROR, 930 "%s: Undefined NVSWITCH_GET_SW_INFO_INDEX 0x%x\n", 931 __FUNCTION__, 932 p->index[i]); 933 retval = -NVL_BAD_ARGS; 934 break; 935 } 936 } 937 938 return retval; 939 } 940 941 static void 942 nvswitch_ctrl_clear_throughput_counters_ls10 943 ( 944 nvswitch_device *device, 945 nvlink_link *link, 946 NvU32 counterMask 947 ) 948 { 949 NvU32 data; 950 951 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLTLC, link->linkNumber)) 952 { 953 return; 954 } 955 956 // 957 // Common usage allows one of these to stand for all of them 958 // If one field is defined: perform a clear on counters 0 & 1 959 // 960 961 if ((counterMask) & ( NVSWITCH_NVLINK_COUNTER_TL_TX0 | 962 NVSWITCH_NVLINK_COUNTER_TL_TX1 | 963 NVSWITCH_NVLINK_COUNTER_TL_RX0 | 964 NVSWITCH_NVLINK_COUNTER_TL_RX1 )) 965 { 966 // TX 0 967 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0(0)); 968 data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _RESET, 0x1, data); 969 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0(0), data); 970 971 // TX 1 972 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0(1)); 973 data = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _RESET, 0x1, data); 974 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0(1), data); 975 976 // RX 0 977 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0(0)); 978 data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _RESET, 0x1, data); 979 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0(0), data); 980 981 // RX 1 982 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0(1)); 983 data = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _RESET, 0x1, data); 984 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0(1), data); 985 } 986 } 987 988 static void 989 nvswitch_ctrl_clear_lp_counters_ls10 990 ( 991 nvswitch_device *device, 992 nvlink_link *link, 993 NvU32 counterMask 994 ) 995 { 996 NvlStatus status; 997 998 // Clears all LP counters 999 if (counterMask & NVSWITCH_NVLINK_LP_COUNTERS_DL) 1000 { 1001 status = nvswitch_minion_send_command(device, link->linkNumber, 1002 NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLLPCNT, 0); 1003 if (status != NVL_SUCCESS) 1004 { 1005 NVSWITCH_PRINT(device, ERROR, "%s : Failed to clear lp counts to MINION for link # %d\n", 1006 __FUNCTION__, link->linkNumber); 1007 } 1008 } 1009 } 1010 1011 static NvlStatus 1012 nvswitch_ctrl_clear_dl_error_counters_ls10 1013 ( 1014 nvswitch_device *device, 1015 nvlink_link *link, 1016 NvU32 counterMask 1017 ) 1018 { 1019 NvU32 data; 1020 1021 if ((!counterMask) || 1022 (!(counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 | 1023 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 | 1024 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 | 1025 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 | 1026 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 | 1027 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 | 1028 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 | 1029 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 | 1030 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS | 1031 NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY | 1032 NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)))) 1033 { 1034 NVSWITCH_PRINT(device, INFO, 1035 "%s: Link%d: No error count clear request, counterMask (0x%x). Returning!\n", 1036 __FUNCTION__, link->linkNumber, counterMask); 1037 return NVL_SUCCESS; 1038 } 1039 1040 // With Minion initialized, send command to minion 1041 if (nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION))) 1042 { 1043 return nvswitch_minion_clear_dl_error_counters_ls10(device, link->linkNumber); 1044 } 1045 1046 // With Minion not-initialized, perform with the registers 1047 if (counterMask & (NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 | 1048 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 | 1049 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 | 1050 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 | 1051 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 | 1052 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 | 1053 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 | 1054 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 | 1055 NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY | 1056 NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY | 1057 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT | 1058 NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED )) 1059 { 1060 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL); 1061 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_LANE_CRC, _CLEAR, data); 1062 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_FLIT_CRC, _CLEAR, data); 1063 data = FLD_SET_DRF(_NVLDL_TX, _ERROR_COUNT_CTRL, _CLEAR_REPLAY, _CLEAR, data); 1064 data = FLD_SET_DRF(_NVLDL_TOP, _ERROR_COUNT_CTRL, _CLEAR_RECOVERY, _CLEAR, data); 1065 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data); 1066 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL, data); 1067 } 1068 1069 if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS) 1070 { 1071 data = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLDL, _NVLDL_RX, _ERROR_COUNT_CTRL); 1072 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_LANE_CRC, _CLEAR, data); 1073 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_RATES, _CLEAR, data); 1074 data = FLD_SET_DRF(_NVLDL_RX, _ERROR_COUNT_CTRL, _CLEAR_ECC_COUNTS, _CLEAR, data); 1075 } 1076 1077 return NVL_SUCCESS; 1078 } 1079 1080 static void 1081 _nvswitch_portstat_reset_latency_counters_ls10 1082 ( 1083 nvswitch_device *device 1084 ) 1085 { 1086 // Set SNAPONDEMAND from 0->1 to reset the counters 1087 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, 1088 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | 1089 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE)); 1090 1091 // Set SNAPONDEMAND back to 0. 1092 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, 1093 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | 1094 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); 1095 } 1096 1097 /* 1098 * Disable interrupts comming from NPG & NVLW blocks. 1099 */ 1100 void 1101 nvswitch_link_disable_interrupts_ls10 1102 ( 1103 nvswitch_device *device, 1104 NvU32 link 1105 ) 1106 { 1107 NvU32 localLinkIdx, instance; 1108 1109 instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10; 1110 localLinkIdx = link % NVSWITCH_LINKS_PER_NVLIPT_LS10; 1111 1112 if (nvswitch_is_soe_supported(device)) 1113 { 1114 nvswitch_soe_set_nport_interrupts_ls10(device, link, NV_FALSE); 1115 } 1116 else 1117 { 1118 NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, 1119 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) | 1120 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) | 1121 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0)); 1122 } 1123 1124 NVSWITCH_ENG_WR32(device, NVLW, , instance, _NVLW, _LINK_INTR_0_MASK(localLinkIdx), 1125 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x0) | 1126 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | 1127 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0)); 1128 1129 NVSWITCH_ENG_WR32(device, NVLW, , instance, _NVLW, _LINK_INTR_1_MASK(localLinkIdx), 1130 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) | 1131 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x0) | 1132 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x0)); 1133 1134 NVSWITCH_ENG_WR32(device, NVLW, , instance, _NVLW, _LINK_INTR_2_MASK(localLinkIdx), 1135 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) | 1136 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) | 1137 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0)); 1138 } 1139 1140 /* 1141 * Reset NPG & NVLW interrupt state. 1142 */ 1143 static void 1144 _nvswitch_link_reset_interrupts_ls10 1145 ( 1146 nvswitch_device *device, 1147 NvU32 link 1148 ) 1149 { 1150 NvU32 regval; 1151 NvU32 eng_instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10; 1152 NvU32 localLinkNum = link % NVSWITCH_LINKS_PER_NVLIPT_LS10; 1153 1154 if (nvswitch_is_soe_supported(device)) 1155 { 1156 nvswitch_soe_set_nport_interrupts_ls10(device, link, NV_TRUE); 1157 } 1158 else 1159 { 1160 NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, 1161 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) | 1162 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) | 1163 DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1)); 1164 } 1165 1166 NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_0_MASK(localLinkNum), 1167 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x1) | 1168 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | 1169 DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0) | 1170 DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x1) | 1171 DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x0)); 1172 1173 NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_1_MASK(localLinkNum), 1174 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) | 1175 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x1) | 1176 DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1) | 1177 DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x0) | 1178 DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x1)); 1179 1180 NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_2_MASK(localLinkNum), 1181 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) | 1182 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) | 1183 DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0) | 1184 DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0, 0x0) | 1185 DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1, 0x0)); 1186 1187 // NVLIPT_LNK 1188 regval = NVSWITCH_LINK_RD32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK); 1189 regval = regval | 1190 DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT0_EN, 0x1) | 1191 DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT1_EN, 0x1); 1192 NVSWITCH_LINK_WR32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK, regval); 1193 1194 // NVLIPT_LNK_INTR_1 1195 regval = NVSWITCH_LINK_RD32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_INT1_EN); 1196 regval = regval | DRF_NUM(_NVLIPT_LNK, _INTR_INT1_EN, _LINKSTATEREQUESTREADYSET, 0x1); 1197 NVSWITCH_LINK_WR32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_INT1_EN, regval); 1198 1199 // Clear fatal error status 1200 device->link[link].fatal_error_occurred = NV_FALSE; 1201 } 1202 1203 // 1204 // Data collector which runs on a background thread, collecting latency stats. 1205 // 1206 // The latency counters have a maximum window period of about 12 hours 1207 // (2^48 clk cycles). The counters reset after this period. So SW snaps 1208 // the bins and records latencies every 6 hours. Setting SNAPONDEMAND from 0->1 1209 // snaps the latency counters and updates them to PRI registers for 1210 // the SW to read. It then resets the counters to start collecting fresh latencies. 1211 // 1212 1213 void 1214 nvswitch_internal_latency_bin_log_ls10 1215 ( 1216 nvswitch_device *device 1217 ) 1218 { 1219 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 1220 NvU32 idx_nport; 1221 NvU32 idx_vc; 1222 NvBool vc_valid; 1223 NvU64 lo, hi; 1224 NvU64 latency; 1225 NvU64 time_nsec; 1226 NvU32 link_type; // Access or trunk link 1227 NvU64 last_visited_time_nsec; 1228 1229 if (chip_device->latency_stats == NULL) 1230 { 1231 // Latency stat buffers not allocated yet 1232 return; 1233 } 1234 1235 time_nsec = nvswitch_os_get_platform_time(); 1236 last_visited_time_nsec = chip_device->latency_stats->last_visited_time_nsec; 1237 1238 // Update last visited time 1239 chip_device->latency_stats->last_visited_time_nsec = time_nsec; 1240 1241 // Compare time stamp and reset the counters if the snap is missed 1242 if (!IS_RTLSIM(device) || !IS_FMODEL(device)) 1243 { 1244 if ((last_visited_time_nsec != 0) && 1245 ((time_nsec - last_visited_time_nsec) > 1246 chip_device->latency_stats->sample_interval_msec * NVSWITCH_INTERVAL_1MSEC_IN_NS)) 1247 { 1248 NVSWITCH_PRINT(device, ERROR, 1249 "Latency metrics recording interval missed. Resetting counters.\n"); 1250 _nvswitch_portstat_reset_latency_counters_ls10(device); 1251 return; 1252 } 1253 } 1254 1255 for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) 1256 { 1257 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, idx_nport)) 1258 { 1259 continue; 1260 } 1261 1262 // Setting SNAPONDEMAND from 0->1 snaps the latencies and resets the counters 1263 NVSWITCH_LINK_WR32_LS10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL, 1264 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | 1265 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE)); 1266 1267 link_type = NVSWITCH_LINK_RD32_LS10(device, idx_nport, NPORT, _NPORT, _CTRL); 1268 for (idx_vc = 0; idx_vc < NVSWITCH_NUM_VCS_LS10; idx_vc++) 1269 { 1270 vc_valid = NV_FALSE; 1271 1272 // VC's CREQ0(0) and RSP0(5) are relevant on access links. 1273 if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, link_type) && 1274 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) || 1275 (idx_vc == NV_NPORT_VC_MAPPING_RSP0))) 1276 { 1277 vc_valid = NV_TRUE; 1278 } 1279 1280 // VC's CREQ0(0), RSP0(5), CREQ1(6) and RSP1(7) are relevant on trunk links. 1281 if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, link_type) && 1282 ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) || 1283 (idx_vc == NV_NPORT_VC_MAPPING_RSP0) || 1284 (idx_vc == NV_NPORT_VC_MAPPING_CREQ1) || 1285 (idx_vc == NV_NPORT_VC_MAPPING_RSP1))) 1286 { 1287 vc_valid = NV_TRUE; 1288 } 1289 1290 // If the VC is not being used, skip reading it 1291 if (!vc_valid) 1292 { 1293 continue; 1294 } 1295 1296 lo = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _LOW, _0, idx_vc); 1297 hi = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _LOW, _1, idx_vc); 1298 latency = lo | (hi << 32); 1299 chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].low += latency; 1300 1301 lo = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _MEDIUM, _0, idx_vc); 1302 hi = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _MEDIUM, _1, idx_vc); 1303 chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].medium += latency; 1304 latency = lo | (hi << 32); 1305 1306 lo = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _HIGH, _0, idx_vc); 1307 hi = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _HIGH, _1, idx_vc); 1308 chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].high += latency; 1309 latency = lo | (hi << 32); 1310 1311 lo = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _PANIC, _0, idx_vc); 1312 hi = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _COUNT, _PANIC, _1, idx_vc); 1313 chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].panic += latency; 1314 latency = lo | (hi << 32); 1315 1316 lo = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _PACKET, _COUNT, _0, idx_vc); 1317 hi = NVSWITCH_NPORT_PORTSTAT_RD32_LS10(device, idx_nport, _PACKET, _COUNT, _1, idx_vc); 1318 chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].count += latency; 1319 latency = lo | (hi << 32); 1320 1321 // Note the time of this snap 1322 chip_device->latency_stats->latency[idx_vc].last_read_time_nsec = time_nsec; 1323 chip_device->latency_stats->latency[idx_vc].count++; 1324 } 1325 1326 // Disable SNAPONDEMAND after fetching the latencies 1327 NVSWITCH_LINK_WR32_LS10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL, 1328 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | 1329 DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); 1330 } 1331 } 1332 1333 static NvlStatus 1334 nvswitch_ctrl_set_ganged_link_table_ls10 1335 ( 1336 nvswitch_device *device, 1337 NVSWITCH_SET_GANGED_LINK_TABLE *p 1338 ) 1339 { 1340 return -NVL_ERR_NOT_SUPPORTED; 1341 } 1342 1343 void 1344 nvswitch_init_npg_multicast_ls10 1345 ( 1346 nvswitch_device *device 1347 ) 1348 { 1349 NVSWITCH_PRINT(device, WARN, "%s: Function not implemented\n", __FUNCTION__); 1350 } 1351 1352 void 1353 nvswitch_init_warm_reset_ls10 1354 ( 1355 nvswitch_device *device 1356 ) 1357 { 1358 NVSWITCH_PRINT(device, WARN, "%s: Function not implemented\n", __FUNCTION__); 1359 } 1360 1361 // 1362 // Helper funcction to query MINION to see if DL clocks are on 1363 // return NV_TRUE if the clocks are on 1364 // NV_FALSE if the clocks are off 1365 static 1366 NvBool 1367 _nvswitch_are_dl_clocks_on 1368 ( 1369 nvswitch_device *device, 1370 NvU32 linkNumber 1371 ) 1372 { 1373 NvU32 link_state; 1374 NvU32 stat_data; 1375 NvlStatus status = NVL_SUCCESS; 1376 nvlink_link * link= nvswitch_get_link(device, linkNumber); 1377 1378 if (link == NULL) 1379 { 1380 NVSWITCH_PRINT(device, ERROR, "%s: invalid link %d\n", 1381 __FUNCTION__, linkNumber); 1382 return NV_FALSE; 1383 } 1384 1385 status = nvswitch_minion_get_dl_status(device, linkNumber, 1386 NV_NVLSTAT_UC01, 0, &stat_data); 1387 if (status != NVL_SUCCESS) 1388 { 1389 return NV_FALSE; 1390 } 1391 1392 link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data); 1393 switch(link_state) 1394 { 1395 case LINKSTATUS_RESET: 1396 case LINKSTATUS_UNINIT: 1397 return NV_FALSE; 1398 case LINKSTATUS_LANESHUTDOWN: 1399 case LINKSTATUS_ACTIVE_PENDING: 1400 return nvswitch_are_link_clocks_on_ls10(device, link, 1401 NVSWITCH_PER_LINK_CLOCK_SET(RXCLK) | NVSWITCH_PER_LINK_CLOCK_SET(TXCLK)); 1402 } 1403 1404 return NV_TRUE; 1405 } 1406 1407 static NvlStatus 1408 _nvswitch_reset_and_drain_links_ls10 1409 ( 1410 nvswitch_device *device, 1411 NvU64 link_mask, 1412 NvBool bForced 1413 ) 1414 { 1415 NvlStatus status = NVL_SUCCESS; 1416 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 1417 nvlink_link *link_info = NULL; 1418 NvU32 link; 1419 NvU32 data32; 1420 NvU32 retry_count = 3; 1421 NvU32 link_state_request; 1422 NvU32 link_state; 1423 NvU32 stat_data; 1424 NvU32 link_intr_subcode; 1425 NvBool bKeepPolling; 1426 NvBool bIsLinkInEmergencyShutdown; 1427 NvBool bAreDlClocksOn; 1428 NVSWITCH_TIMEOUT timeout; 1429 1430 if (link_mask == 0) 1431 { 1432 NVSWITCH_PRINT(device, ERROR, "%s: Invalid link_mask 0\n", 1433 __FUNCTION__); 1434 return -NVL_BAD_ARGS; 1435 } 1436 1437 // Check for inactive links 1438 FOR_EACH_INDEX_IN_MASK(64, link, link_mask) 1439 { 1440 if (!nvswitch_is_link_valid(device, link)) 1441 { 1442 NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n", 1443 __FUNCTION__, link); 1444 continue; 1445 } 1446 1447 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, link)) 1448 { 1449 NVSWITCH_PRINT(device, ERROR, "%s: NPORT #%d invalid\n", 1450 __FUNCTION__, link); 1451 continue; 1452 } 1453 1454 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLW, link)) 1455 { 1456 NVSWITCH_PRINT(device, ERROR, "%s: NVLW #%d invalid\n", 1457 __FUNCTION__, link); 1458 continue; 1459 } 1460 } 1461 FOR_EACH_INDEX_IN_MASK_END; 1462 1463 FOR_EACH_INDEX_IN_MASK(64, link, link_mask) 1464 { 1465 link_info = nvswitch_get_link(device, link); 1466 if (link_info == NULL) 1467 { 1468 NVSWITCH_PRINT(device, ERROR, "%s: invalid link %d\n", 1469 __FUNCTION__, link); 1470 continue; 1471 } 1472 1473 if (nvswitch_is_link_in_reset(device, link_info)) 1474 { 1475 continue; 1476 } 1477 1478 // Unregister links to make them unusable while reset is in progress. 1479 nvlink_lib_unregister_link(link_info); 1480 1481 // 1482 // Step 1.0 : Check NXBAR error state. NXBAR errors are always fatal 1483 // errors and are assumed to require a full power-on reset to recover. 1484 // No incremental recovery is possible after a NXBAR error. 1485 // 1486 data32 = NVSWITCH_NPORT_RD32_LS10(device, link, _EGRESS, _ERR_STATUS_0); 1487 if (FLD_TEST_DRF(_EGRESS, _ERR_STATUS_0, _EGRESSBUFERR, _CLEAR, data32) || 1488 FLD_TEST_DRF(_EGRESS, _ERR_STATUS_0, _SEQIDERR, _CLEAR, data32) || 1489 FLD_TEST_DRF(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, _CLEAR, data32) || 1490 FLD_TEST_DRF(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_DBE_ERR, _CLEAR, data32) || 1491 FLD_TEST_DRF(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_PARITY_ERR, _CLEAR, data32)) 1492 { 1493 NVSWITCH_PRINT(device, ERROR, 1494 "%s: Fatal NXBAR error on link %d. Chip reset required\n", 1495 __FUNCTION__, link); 1496 1497 // Re-register links. 1498 status = nvlink_lib_register_link(device->nvlink_device, link_info); 1499 if (status != NVL_SUCCESS) 1500 { 1501 nvswitch_destroy_link(link_info); 1502 } 1503 1504 continue; 1505 } 1506 1507 // 1508 // Step 2.0 : Disable NPG & NVLW interrupts 1509 // 1510 nvswitch_link_disable_interrupts_ls10(device, link); 1511 1512 // 1513 // Step 3.0 : 1514 // Prior to starting port reset, ensure the links is in emergency shutdown 1515 // 1516 // Forcibly shutdown links if requested 1517 // 1518 if (bForced) 1519 { 1520 nvswitch_execute_unilateral_link_shutdown_ls10(link_info); 1521 } 1522 else 1523 { 1524 bIsLinkInEmergencyShutdown = NV_FALSE; 1525 nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 1526 do 1527 { 1528 bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 1529 1530 status = nvswitch_minion_get_dl_status(device, link_info->linkNumber, 1531 NV_NVLSTAT_UC01, 0, &stat_data); 1532 1533 if (status != NVL_SUCCESS) 1534 { 1535 continue; 1536 } 1537 1538 link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data); 1539 1540 bIsLinkInEmergencyShutdown = (link_state == LINKSTATUS_EMERGENCY_SHUTDOWN) ? 1541 NV_TRUE:NV_FALSE; 1542 1543 if (bIsLinkInEmergencyShutdown == NV_TRUE) 1544 { 1545 break; 1546 } 1547 } 1548 while(bKeepPolling); 1549 1550 if (bIsLinkInEmergencyShutdown == NV_FALSE) 1551 { 1552 NVSWITCH_PRINT(device, ERROR, 1553 "%s: link %d failed to enter emergency shutdown\n", 1554 __FUNCTION__, link); 1555 1556 // Re-register links. 1557 status = nvlink_lib_register_link(device->nvlink_device, link_info); 1558 if (status != NVL_SUCCESS) 1559 { 1560 nvswitch_destroy_link(link_info); 1561 } 1562 continue; 1563 } 1564 } 1565 1566 nvswitch_corelib_clear_link_state_ls10(link_info); 1567 1568 // 1569 // Step 4.0 : Send command to SOE to perform the following steps : 1570 // - Backup NPORT state before reset 1571 // - Set the INGRESS_STOP bit of CTRL_STOP (0x48) 1572 // - Assert debug_clear for the given port NPORT by writing to the 1573 // DEBUG_CLEAR (0x144) register 1574 // - Assert NPortWarmReset[i] using the WARMRESET (0x140) register 1575 // 1576 nvswitch_soe_issue_nport_reset_ls10(device, link); 1577 1578 // 1579 // Step 5.0 : Issue Minion request to perform the link reset sequence 1580 // We retry the Minion reset sequence 3 times, if we there is an error 1581 // while trying to reset the first few times. Refer Bug 3799577 for 1582 // more details. 1583 // 1584 do 1585 { 1586 status = nvswitch_request_tl_link_state_ls10(link_info, 1587 NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE); 1588 1589 if (status == NVL_SUCCESS) 1590 { 1591 break; 1592 } 1593 else 1594 { 1595 1596 link_state_request = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, 1597 NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_REQUEST); 1598 1599 link_state = DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS, 1600 link_state_request); 1601 1602 if (nvswitch_minion_get_dl_status(device, link_info->linkNumber, 1603 NV_NVLSTAT_MN00, 0, &stat_data) == NVL_SUCCESS) 1604 { 1605 link_intr_subcode = DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, stat_data); 1606 1607 if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) && 1608 (link_intr_subcode == MINION_ALARM_BUSY)) 1609 { 1610 1611 // 1612 // We retry the reset sequence when we see a MINION_REQUEST_FAIL 1613 // or MINION_ALARM_BUSY 1614 // 1615 } 1616 else 1617 { 1618 break; 1619 } 1620 } 1621 1622 retry_count--; 1623 } 1624 1625 } while(retry_count); 1626 1627 if (status != NVL_SUCCESS) 1628 { 1629 NVSWITCH_PRINT(device, ERROR, 1630 "%s: NvLink Reset has failed for link %d\n", 1631 __FUNCTION__, link); 1632 1633 // Re-register links. 1634 status = nvlink_lib_register_link(device->nvlink_device, link_info); 1635 if (status != NVL_SUCCESS) 1636 { 1637 nvswitch_destroy_link(link_info); 1638 } 1639 continue; 1640 } 1641 1642 // 1643 // Step 6.0 : Send command to SOE to perform the following steps : 1644 // - Clear the INGRESS_STOP bit of CTRL_STOP (0x48) 1645 // - Clear the CONTAIN_AND_DRAIN (0x5c) status 1646 // - Assert NPORT INITIALIZATION and program the state tracking RAMS 1647 // - Restore NPORT state after reset 1648 // 1649 nvswitch_soe_restore_nport_state_ls10(device, link); 1650 1651 // Step 7.0 : Re-program the routing table for DBEs 1652 1653 // Step 8.0 : Reset NVLW and NPORT interrupt state 1654 _nvswitch_link_reset_interrupts_ls10(device, link); 1655 1656 // Re-register links. 1657 status = nvlink_lib_register_link(device->nvlink_device, link_info); 1658 if (status != NVL_SUCCESS) 1659 { 1660 NVSWITCH_PRINT(device, ERROR, 1661 "%s: Failed to register link: 0x%x with the corelib\n", 1662 __FUNCTION__, link); 1663 nvswitch_destroy_link(link_info); 1664 continue; 1665 } 1666 1667 // Initialize select scratch registers to 0x0 1668 device->hal.nvswitch_init_scratch(device); 1669 1670 // 1671 // Step 9.0: Launch ALI training to re-initialize and train the links 1672 // nvswitch_launch_ALI_link_training(device, link_info); 1673 // 1674 // Request active, but don't block. FM will come back and check 1675 // active link status by blocking on this TLREQ's completion 1676 // 1677 // CCI will re-train links 1678 if (!cciIsLinkManaged(device, link)) 1679 { 1680 status = nvswitch_request_tl_link_state_ls10(link_info, 1681 NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_ACTIVE, 1682 NV_FALSE); 1683 1684 if (status != NVL_SUCCESS) 1685 { 1686 NVSWITCH_PRINT(device, ERROR, 1687 "%s: TL link state request to active for ALI failed for link: 0x%x\n", 1688 __FUNCTION__, link); 1689 continue; 1690 } 1691 } 1692 1693 bAreDlClocksOn = NV_FALSE; 1694 nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 1695 do 1696 { 1697 bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 1698 bAreDlClocksOn = _nvswitch_are_dl_clocks_on(device, link); 1699 1700 if (bAreDlClocksOn) 1701 { 1702 break; 1703 } 1704 } 1705 while(bKeepPolling); 1706 1707 if (!bAreDlClocksOn) 1708 { 1709 NVSWITCH_PRINT(device, ERROR, 1710 "%s: link: 0x%x doesn't have the TX/RX clocks on, skipping setting DL interrupts!\n", 1711 __FUNCTION__, link); 1712 continue; 1713 } 1714 1715 nvswitch_set_dlpl_interrupts_ls10(link_info); 1716 } 1717 FOR_EACH_INDEX_IN_MASK_END; 1718 1719 chip_device->deferredLinkErrors[link].state.lastRetrainTime = nvswitch_os_get_platform_time(); 1720 1721 return NVL_SUCCESS; 1722 } 1723 1724 // 1725 // Implement reset and drain sequence for ls10 1726 // 1727 NvlStatus 1728 nvswitch_reset_and_drain_links_ls10 1729 ( 1730 nvswitch_device *device, 1731 NvU64 link_mask, 1732 NvBool bForced 1733 ) 1734 { 1735 NvlStatus status = NVL_SUCCESS; 1736 1737 NvU32 link; 1738 1739 // CCI will call reset and drain separately 1740 FOR_EACH_INDEX_IN_MASK(64, link, link_mask) 1741 { 1742 if (cciIsLinkManaged(device, link)) 1743 { 1744 link_mask = link_mask & ~NVBIT64(link); 1745 } 1746 } 1747 FOR_EACH_INDEX_IN_MASK_END; 1748 1749 status = _nvswitch_reset_and_drain_links_ls10(device, link_mask, bForced); 1750 if (status != NVL_SUCCESS) 1751 { 1752 return status; 1753 } 1754 1755 return NVL_SUCCESS; 1756 } 1757 1758 NvlStatus 1759 nvswitch_set_nport_port_config_ls10 1760 ( 1761 nvswitch_device *device, 1762 NVSWITCH_SET_SWITCH_PORT_CONFIG *p 1763 ) 1764 { 1765 NvU32 val; 1766 NvlStatus status = NVL_SUCCESS; 1767 1768 if (p->requesterLinkID >= NVBIT( 1769 DRF_SIZE(NV_NPORT_REQLINKID_REQROUTINGID) + 1770 DRF_SIZE(NV_NPORT_REQLINKID_REQROUTINGID_UPPER))) 1771 { 1772 NVSWITCH_PRINT(device, ERROR, 1773 "%s: Invalid requester RID 0x%x\n", 1774 __FUNCTION__, p->requesterLinkID); 1775 return -NVL_BAD_ARGS; 1776 } 1777 1778 if (p->requesterLanID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGLAN)) 1779 { 1780 NVSWITCH_PRINT(device, ERROR, 1781 "%s: Invalid requester RLAN 0x%x\n", 1782 __FUNCTION__, p->requesterLanID); 1783 return -NVL_BAD_ARGS; 1784 } 1785 1786 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _NPORT, _CTRL); 1787 switch (p->type) 1788 { 1789 case CONNECT_ACCESS_GPU: 1790 case CONNECT_ACCESS_CPU: 1791 case CONNECT_ACCESS_SWITCH: 1792 val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, val); 1793 break; 1794 case CONNECT_TRUNK_SWITCH: 1795 val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, val); 1796 break; 1797 default: 1798 NVSWITCH_PRINT(device, ERROR, 1799 "%s: invalid type #%d\n", 1800 __FUNCTION__, p->type); 1801 return -NVL_BAD_ARGS; 1802 } 1803 1804 // _ENDPOINT_COUNT deprecated on LS10 1805 1806 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val); 1807 1808 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _REQLINKID, 1809 DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID, p->requesterLinkID) | 1810 DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID_UPPER, 1811 p->requesterLinkID >> DRF_SIZE(NV_NPORT_REQLINKID_REQROUTINGID)) | 1812 DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGLAN, p->requesterLanID)); 1813 1814 if (p->type == CONNECT_TRUNK_SWITCH) 1815 { 1816 if (!nvswitch_is_soe_supported(device)) 1817 { 1818 // Set trunk specific settings (TPROD) on PRE-SILION 1819 1820 // NPORT 1821 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _NPORT, _CTRL); 1822 val = FLD_SET_DRF(_NPORT, _CTRL, _EGDRAINENB, _DISABLE, val); 1823 val = FLD_SET_DRF(_NPORT, _CTRL, _ENEGRESSDBI, _ENABLE, val); 1824 val = FLD_SET_DRF(_NPORT, _CTRL, _ENROUTEDBI, _ENABLE, val); 1825 val = FLD_SET_DRF(_NPORT, _CTRL, _RTDRAINENB, _DISABLE, val); 1826 val = FLD_SET_DRF(_NPORT, _CTRL, _SPARE, _INIT, val); 1827 val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, __TPROD, val); 1828 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val); 1829 1830 // EGRESS 1831 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _EGRESS, _CTRL); 1832 val = FLD_SET_DRF(_EGRESS, _CTRL, _CTO_ENB, __TPROD, val); 1833 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _EGRESS, _CTRL, val); 1834 1835 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _EGRESS, _ERR_CONTAIN_EN_0); 1836 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _CREDIT_TIME_OUT_ERR, __TPROD, val); 1837 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _HWRSPERR, __TPROD, val); 1838 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _INVALIDVCSET_ERR, __TPROD, val); 1839 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _REQTGTIDMISMATCHERR, __TPROD, val); 1840 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _RSPREQIDMISMATCHERR, __TPROD, val); 1841 val = FLD_SET_DRF(_EGRESS, _ERR_CONTAIN_EN_0, _URRSPERR, __TPROD, val); 1842 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _EGRESS, _ERR_CONTAIN_EN_0, val); 1843 1844 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _EGRESS, _ERR_FATAL_REPORT_EN_0); 1845 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _CREDIT_TIME_OUT_ERR, __TPROD, val); 1846 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _HWRSPERR, __TPROD, val); 1847 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _INVALIDVCSET_ERR, __TPROD, val); 1848 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _REQTGTIDMISMATCHERR, __TPROD, val); 1849 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RSPREQIDMISMATCHERR, __TPROD, val); 1850 val = FLD_SET_DRF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _URRSPERR, __TPROD, val); 1851 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _EGRESS, _ERR_FATAL_REPORT_EN_0, val); 1852 1853 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _EGRESS, _ERR_LOG_EN_0); 1854 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _CREDIT_TIME_OUT_ERR, __TPROD, val); 1855 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _HWRSPERR, __TPROD, val); 1856 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _INVALIDVCSET_ERR, __TPROD, val); 1857 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _REQTGTIDMISMATCHERR, __TPROD, val); 1858 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _RSPREQIDMISMATCHERR, __TPROD, val); 1859 val = FLD_SET_DRF(_EGRESS, _ERR_LOG_EN_0, _URRSPERR, __TPROD, val); 1860 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _EGRESS, _ERR_LOG_EN_0, val); 1861 1862 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0); 1863 val = FLD_SET_DRF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _PRIVRSPERR, __TPROD, val); 1864 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, val); 1865 1866 // INGRESS 1867 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_CONTAIN_EN_0); 1868 val = FLD_SET_DRF(_INGRESS, _ERR_CONTAIN_EN_0, _EXTAREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1869 val = FLD_SET_DRF(_INGRESS, _ERR_CONTAIN_EN_0, _EXTBREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1870 val = FLD_SET_DRF(_INGRESS, _ERR_CONTAIN_EN_0, _INVALIDVCSET, __TPROD, val); 1871 val = FLD_SET_DRF(_INGRESS, _ERR_CONTAIN_EN_0, _MCREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1872 val = FLD_SET_DRF(_INGRESS, _ERR_CONTAIN_EN_0, _REMAPTAB_ECC_DBE_ERR, __TPROD, val); 1873 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_CONTAIN_EN_0, val); 1874 1875 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_FATAL_REPORT_EN_0); 1876 val = FLD_SET_DRF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1877 val = FLD_SET_DRF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1878 val = FLD_SET_DRF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _INVALIDVCSET, __TPROD, val); 1879 val = FLD_SET_DRF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _MCREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1880 val = FLD_SET_DRF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _REMAPTAB_ECC_DBE_ERR, __TPROD, val); 1881 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_FATAL_REPORT_EN_0, val); 1882 1883 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_LOG_EN_0); 1884 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_0, _EXTAREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1885 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_0, _EXTBREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1886 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_0, _INVALIDVCSET, __TPROD, val); 1887 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_0, _MCREMAPTAB_ECC_DBE_ERR, __TPROD, val); 1888 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_0, _REMAPTAB_ECC_DBE_ERR, __TPROD, val); 1889 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_LOG_EN_0, val); 1890 1891 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_LOG_EN_1); 1892 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _EXTAREMAPTAB_ADDRTYPEERR, __TPROD, val); 1893 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _EXTAREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1894 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _EXTBREMAPTAB_ADDRTYPEERR, __TPROD, val); 1895 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _EXTBREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1896 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _MCCMDTOUCADDRERR, __TPROD, val); 1897 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _MCREMAPTAB_ADDRTYPEERR, __TPROD, val); 1898 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _MCREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1899 val = FLD_SET_DRF(_INGRESS, _ERR_LOG_EN_1, _READMCREFLECTMEMERR, __TPROD, val); 1900 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_LOG_EN_1, val); 1901 1902 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0); 1903 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ACLFAIL, __TPROD, val); 1904 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRBOUNDSERR, __TPROD, val); 1905 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRTYPEERR, __TPROD, val); 1906 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ACLFAIL, __TPROD, val); 1907 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_ADDRBOUNDSERR, __TPROD, val); 1908 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_INDEX_ERR, __TPROD, val); 1909 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTAREMAPTAB_REQCONTEXTMISMATCHERR, __TPROD, val); 1910 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ACLFAIL, __TPROD, val); 1911 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_ADDRBOUNDSERR, __TPROD, val); 1912 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_INDEX_ERR, __TPROD, val); 1913 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EXTBREMAPTAB_REQCONTEXTMISMATCHERR, __TPROD, val); 1914 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_ACLFAIL, __TPROD, val); 1915 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_ADDRBOUNDSERR, __TPROD, val); 1916 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_INDEX_ERR, __TPROD, val); 1917 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _MCREMAPTAB_REQCONTEXTMISMATCHERR, __TPROD, val); 1918 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1919 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REQCONTEXTMISMATCHERR, __TPROD, val); 1920 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, val); 1921 1922 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1); 1923 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTAREMAPTAB_ADDRTYPEERR, __TPROD, val); 1924 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTAREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1925 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTBREMAPTAB_ADDRTYPEERR, __TPROD, val); 1926 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _EXTBREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1927 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCCMDTOUCADDRERR, __TPROD, val); 1928 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREMAPTAB_ADDRTYPEERR, __TPROD, val); 1929 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _MCREMAPTAB_ECC_LIMIT_ERR, __TPROD, val); 1930 val = FLD_SET_DRF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_1, _READMCREFLECTMEMERR, __TPROD, val); 1931 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1, val); 1932 1933 // SOURCETRACK 1934 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_CONTAIN_EN_0); 1935 val = FLD_SET_DRF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, __TPROD, val); 1936 val = FLD_SET_DRF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _DUP_CREQ_TCEN0_TAG_ERR, __TPROD, val); 1937 val = FLD_SET_DRF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _INVALID_TCEN0_RSP_ERR, __TPROD, val); 1938 val = FLD_SET_DRF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _INVALID_TCEN1_RSP_ERR, __TPROD, val); 1939 val = FLD_SET_DRF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _SOURCETRACK_TIME_OUT_ERR, __TPROD, val); 1940 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_CONTAIN_EN_0, val); 1941 1942 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0); 1943 val = FLD_SET_DRF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, __TPROD, val); 1944 val = FLD_SET_DRF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _DUP_CREQ_TCEN0_TAG_ERR, __TPROD, val); 1945 val = FLD_SET_DRF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _INVALID_TCEN0_RSP_ERR, __TPROD, val); 1946 val = FLD_SET_DRF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _INVALID_TCEN1_RSP_ERR, __TPROD, val); 1947 val = FLD_SET_DRF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _SOURCETRACK_TIME_OUT_ERR, __TPROD, val); 1948 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, val); 1949 1950 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_LOG_EN_0); 1951 val = FLD_SET_DRF(_SOURCETRACK, _ERR_LOG_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, __TPROD, val); 1952 val = FLD_SET_DRF(_SOURCETRACK, _ERR_LOG_EN_0, _DUP_CREQ_TCEN0_TAG_ERR, __TPROD, val); 1953 val = FLD_SET_DRF(_SOURCETRACK, _ERR_LOG_EN_0, _INVALID_TCEN0_RSP_ERR, __TPROD, val); 1954 val = FLD_SET_DRF(_SOURCETRACK, _ERR_LOG_EN_0, _INVALID_TCEN1_RSP_ERR, __TPROD, val); 1955 val = FLD_SET_DRF(_SOURCETRACK, _ERR_LOG_EN_0, _SOURCETRACK_TIME_OUT_ERR, __TPROD, val); 1956 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_LOG_EN_0, val); 1957 1958 val = NVSWITCH_LINK_RD32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0); 1959 val = FLD_SET_DRF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, __TPROD, val); 1960 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, val); 1961 } 1962 else 1963 { 1964 // Set trunk specific settings (TPROD) in SOE 1965 status = nvswitch_set_nport_tprod_state_ls10(device, p->portNum); 1966 if (status != NVL_SUCCESS) 1967 { 1968 NVSWITCH_PRINT(device, ERROR, 1969 "%s: Failed to set NPORT TPROD state\n", 1970 __FUNCTION__); 1971 } 1972 } 1973 } 1974 else 1975 { 1976 // PROD setting assumes ACCESS link 1977 } 1978 1979 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _SRC_PORT_TYPE0, NvU64_LO32(p->trunkSrcMask)); 1980 NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _SRC_PORT_TYPE1, NvU64_HI32(p->trunkSrcMask)); 1981 1982 return status; 1983 } 1984 1985 /* 1986 * @brief Returns the ingress requester link id. 1987 * 1988 * @param[in] device nvswitch device 1989 * @param[in] params NVSWITCH_GET_INGRESS_REQLINKID_PARAMS 1990 * 1991 * @returns NVL_SUCCESS if action succeeded, 1992 * -NVL_ERR_INVALID_STATE invalid link 1993 */ 1994 NvlStatus 1995 nvswitch_ctrl_get_ingress_reqlinkid_ls10 1996 ( 1997 nvswitch_device *device, 1998 NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params 1999 ) 2000 { 2001 NvU32 regval; 2002 2003 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, params->portNum)) 2004 { 2005 return -NVL_BAD_ARGS; 2006 } 2007 2008 regval = NVSWITCH_NPORT_RD32_LS10(device, params->portNum, _NPORT, _REQLINKID); 2009 params->requesterLinkID = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID, regval) | 2010 (DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID_UPPER, regval) << 2011 DRF_SIZE(NV_NPORT_REQLINKID_REQROUTINGID)); 2012 2013 return NVL_SUCCESS; 2014 } 2015 2016 static NvlStatus 2017 nvswitch_ctrl_get_internal_latency_ls10 2018 ( 2019 nvswitch_device *device, 2020 NVSWITCH_GET_INTERNAL_LATENCY *pLatency 2021 ) 2022 { 2023 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 2024 NvU32 vc_selector = pLatency->vc_selector; 2025 NvU32 idx_nport; 2026 2027 // Validate VC selector 2028 if (vc_selector >= NVSWITCH_NUM_VCS_LS10) 2029 { 2030 return -NVL_BAD_ARGS; 2031 } 2032 2033 nvswitch_os_memset(pLatency, 0, sizeof(*pLatency)); 2034 pLatency->vc_selector = vc_selector; 2035 2036 // Snap up-to-the moment stats 2037 nvswitch_internal_latency_bin_log(device); 2038 2039 for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) 2040 { 2041 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, idx_nport)) 2042 { 2043 continue; 2044 } 2045 2046 pLatency->egressHistogram[idx_nport].low = 2047 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low; 2048 pLatency->egressHistogram[idx_nport].medium = 2049 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium; 2050 pLatency->egressHistogram[idx_nport].high = 2051 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high; 2052 pLatency->egressHistogram[idx_nport].panic = 2053 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic; 2054 pLatency->egressHistogram[idx_nport].count = 2055 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count; 2056 } 2057 2058 pLatency->elapsed_time_msec = 2059 (chip_device->latency_stats->latency[vc_selector].last_read_time_nsec - 2060 chip_device->latency_stats->latency[vc_selector].start_time_nsec)/1000000ULL; 2061 2062 chip_device->latency_stats->latency[vc_selector].start_time_nsec = 2063 chip_device->latency_stats->latency[vc_selector].last_read_time_nsec; 2064 2065 chip_device->latency_stats->latency[vc_selector].count = 0; 2066 2067 // Clear accum_latency[] 2068 for (idx_nport = 0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) 2069 { 2070 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low = 0; 2071 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium = 0; 2072 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high = 0; 2073 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic = 0; 2074 chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count = 0; 2075 } 2076 2077 return NVL_SUCCESS; 2078 } 2079 2080 NvlStatus 2081 nvswitch_ctrl_set_latency_bins_ls10 2082 ( 2083 nvswitch_device *device, 2084 NVSWITCH_SET_LATENCY_BINS *pLatency 2085 ) 2086 { 2087 NvU32 vc_selector; 2088 const NvU32 freq_mhz = 1330; 2089 const NvU32 switchpll_hz = freq_mhz * 1000000ULL; // TODO: Verify this against POR clocks 2090 const NvU32 min_threshold = 10; // Must be > zero to avoid div by zero 2091 const NvU32 max_threshold = 10000; 2092 2093 // Quick input validation and ns to register value conversion 2094 for (vc_selector = 0; vc_selector < NVSWITCH_NUM_VCS_LS10; vc_selector++) 2095 { 2096 if ((pLatency->bin[vc_selector].lowThreshold > max_threshold) || 2097 (pLatency->bin[vc_selector].lowThreshold < min_threshold) || 2098 (pLatency->bin[vc_selector].medThreshold > max_threshold) || 2099 (pLatency->bin[vc_selector].medThreshold < min_threshold) || 2100 (pLatency->bin[vc_selector].hiThreshold > max_threshold) || 2101 (pLatency->bin[vc_selector].hiThreshold < min_threshold) || 2102 (pLatency->bin[vc_selector].lowThreshold > pLatency->bin[vc_selector].medThreshold) || 2103 (pLatency->bin[vc_selector].medThreshold > pLatency->bin[vc_selector].hiThreshold)) 2104 { 2105 return -NVL_BAD_ARGS; 2106 } 2107 2108 pLatency->bin[vc_selector].lowThreshold = 2109 switchpll_hz / (1000000000 / pLatency->bin[vc_selector].lowThreshold); 2110 pLatency->bin[vc_selector].medThreshold = 2111 switchpll_hz / (1000000000 / pLatency->bin[vc_selector].medThreshold); 2112 pLatency->bin[vc_selector].hiThreshold = 2113 switchpll_hz / (1000000000 / pLatency->bin[vc_selector].hiThreshold); 2114 2115 NVSWITCH_PORTSTAT_BCAST_WR32_LS10(device, _LIMIT, _LOW, vc_selector, pLatency->bin[vc_selector].lowThreshold); 2116 NVSWITCH_PORTSTAT_BCAST_WR32_LS10(device, _LIMIT, _MEDIUM, vc_selector, pLatency->bin[vc_selector].medThreshold); 2117 NVSWITCH_PORTSTAT_BCAST_WR32_LS10(device, _LIMIT, _HIGH, vc_selector, pLatency->bin[vc_selector].hiThreshold); 2118 } 2119 2120 return NVL_SUCCESS; 2121 } 2122 2123 // 2124 // MODS-only IOCTLS 2125 // 2126 2127 /* 2128 * REGISTER_READ/_WRITE 2129 * Provides direct access to the MMIO space for trusted clients like MODS. 2130 * This API should not be exposed to unsecure clients. 2131 */ 2132 2133 /* 2134 * _nvswitch_get_engine_base 2135 * Used by REGISTER_READ/WRITE API. Looks up an engine based on device/instance 2136 * and returns the base address in BAR0. 2137 * 2138 * register_rw_engine [in] REGISTER_RW_ENGINE_* 2139 * instance [in] physical instance of device 2140 * bcast [in] FALSE: find unicast base address 2141 * TRUE: find broadcast base address 2142 * base_addr [out] base address in BAR0 of requested device 2143 * 2144 * Returns NVL_SUCCESS: Device base address successfully found 2145 * else device lookup failed 2146 */ 2147 2148 static NvlStatus 2149 _nvswitch_get_engine_base_ls10 2150 ( 2151 nvswitch_device *device, 2152 NvU32 register_rw_engine, // REGISTER_RW_ENGINE_* 2153 NvU32 instance, // device instance 2154 NvBool bcast, 2155 NvU32 *base_addr 2156 ) 2157 { 2158 NvU32 base = 0; 2159 ENGINE_DISCOVERY_TYPE_LS10 *engine = NULL; 2160 NvlStatus retval = NVL_SUCCESS; 2161 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 2162 2163 // Find the engine descriptor matching the request 2164 engine = NULL; 2165 2166 switch (register_rw_engine) 2167 { 2168 case REGISTER_RW_ENGINE_RAW: 2169 // Special case raw IO 2170 if ((instance != 0) || 2171 (bcast != NV_FALSE)) 2172 { 2173 retval = -NVL_BAD_ARGS; 2174 } 2175 break; 2176 2177 case REGISTER_RW_ENGINE_FUSE: 2178 case REGISTER_RW_ENGINE_JTAG: 2179 case REGISTER_RW_ENGINE_PMGR: 2180 // 2181 // Legacy devices are always single-instance, unicast-only. 2182 // These manuals are BAR0 offset-based, not IP-based. Treat them 2183 // the same as RAW. 2184 // 2185 if ((instance != 0) || 2186 (bcast != NV_FALSE)) 2187 { 2188 retval = -NVL_BAD_ARGS; 2189 } 2190 register_rw_engine = REGISTER_RW_ENGINE_RAW; 2191 break; 2192 2193 case REGISTER_RW_ENGINE_SAW: 2194 if (bcast) 2195 { 2196 retval = -NVL_BAD_ARGS; 2197 } 2198 else 2199 { 2200 if (NVSWITCH_ENG_VALID_LS10(device, SAW, instance)) 2201 { 2202 engine = &chip_device->engSAW[instance]; 2203 } 2204 } 2205 break; 2206 2207 case REGISTER_RW_ENGINE_SOE: 2208 if (bcast) 2209 { 2210 retval = -NVL_BAD_ARGS; 2211 } 2212 else 2213 { 2214 if (NVSWITCH_ENG_VALID_LS10(device, SOE, instance)) 2215 { 2216 engine = &chip_device->engSOE[instance]; 2217 } 2218 } 2219 break; 2220 2221 case REGISTER_RW_ENGINE_SE: 2222 if (bcast) 2223 { 2224 retval = -NVL_BAD_ARGS; 2225 } 2226 else 2227 { 2228 if (NVSWITCH_ENG_VALID_LS10(device, SE, instance)) 2229 { 2230 engine = &chip_device->engSE[instance]; 2231 } 2232 } 2233 break; 2234 2235 case REGISTER_RW_ENGINE_CLKS_SYS: 2236 if (bcast) 2237 { 2238 retval = -NVL_BAD_ARGS; 2239 } 2240 else 2241 { 2242 if (NVSWITCH_ENG_VALID_LS10(device, CLKS_SYS, instance)) 2243 { 2244 engine = &chip_device->engCLKS_SYS[instance]; 2245 } 2246 } 2247 break; 2248 2249 case REGISTER_RW_ENGINE_CLKS_SYSB: 2250 if (bcast) 2251 { 2252 retval = -NVL_BAD_ARGS; 2253 } 2254 else 2255 { 2256 if (NVSWITCH_ENG_VALID_LS10(device, CLKS_SYSB, instance)) 2257 { 2258 engine = &chip_device->engCLKS_SYSB[instance]; 2259 } 2260 } 2261 break; 2262 2263 case REGISTER_RW_ENGINE_CLKS_P0: 2264 if (bcast) 2265 { 2266 if (NVSWITCH_ENG_VALID_LS10(device, CLKS_P0_BCAST, instance)) 2267 { 2268 engine = &chip_device->engCLKS_P0_BCAST[instance]; 2269 } 2270 } 2271 else 2272 { 2273 if (NVSWITCH_ENG_VALID_LS10(device, CLKS_P0, instance)) 2274 { 2275 engine = &chip_device->engCLKS_P0[instance]; 2276 } 2277 } 2278 break; 2279 2280 case REGISTER_RW_ENGINE_XPL: 2281 if (bcast) 2282 { 2283 retval = -NVL_BAD_ARGS; 2284 } 2285 else 2286 { 2287 if (NVSWITCH_ENG_VALID_LS10(device, XPL, instance)) 2288 { 2289 engine = &chip_device->engXPL[instance]; 2290 } 2291 } 2292 break; 2293 2294 case REGISTER_RW_ENGINE_XTL: 2295 if (bcast) 2296 { 2297 retval = -NVL_BAD_ARGS; 2298 } 2299 else 2300 { 2301 if (NVSWITCH_ENG_VALID_LS10(device, XTL, instance)) 2302 { 2303 engine = &chip_device->engXTL[instance]; 2304 } 2305 } 2306 break; 2307 2308 case REGISTER_RW_ENGINE_NVLW: 2309 if (bcast) 2310 { 2311 if (NVSWITCH_ENG_VALID_LS10(device, NVLW_BCAST, instance)) 2312 { 2313 engine = &chip_device->engNVLW_BCAST[instance]; 2314 } 2315 } 2316 else 2317 { 2318 if (NVSWITCH_ENG_VALID_LS10(device, NVLW, instance)) 2319 { 2320 engine = &chip_device->engNVLW[instance]; 2321 } 2322 } 2323 break; 2324 2325 case REGISTER_RW_ENGINE_MINION: 2326 if (bcast) 2327 { 2328 if (NVSWITCH_ENG_VALID_LS10(device, MINION_BCAST, instance)) 2329 { 2330 engine = &chip_device->engMINION_BCAST[instance]; 2331 } 2332 } 2333 else 2334 { 2335 if (NVSWITCH_ENG_VALID_LS10(device, MINION, instance)) 2336 { 2337 engine = &chip_device->engMINION[instance]; 2338 } 2339 } 2340 break; 2341 2342 case REGISTER_RW_ENGINE_NVLIPT: 2343 if (bcast) 2344 { 2345 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT_BCAST, instance)) 2346 { 2347 engine = &chip_device->engNVLIPT_BCAST[instance]; 2348 } 2349 } 2350 else 2351 { 2352 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT, instance)) 2353 { 2354 engine = &chip_device->engNVLIPT[instance]; 2355 } 2356 } 2357 break; 2358 2359 case REGISTER_RW_ENGINE_NVLTLC: 2360 if (bcast) 2361 { 2362 if (NVSWITCH_ENG_VALID_LS10(device, NVLTLC_BCAST, instance)) 2363 { 2364 engine = &chip_device->engNVLTLC_BCAST[instance]; 2365 } 2366 } 2367 else 2368 { 2369 if (NVSWITCH_ENG_VALID_LS10(device, NVLTLC, instance)) 2370 { 2371 engine = &chip_device->engNVLTLC[instance]; 2372 } 2373 } 2374 break; 2375 2376 case REGISTER_RW_ENGINE_NVLTLC_MULTICAST: 2377 if (bcast) 2378 { 2379 if (NVSWITCH_ENG_VALID_LS10(device, NVLTLC_MULTICAST_BCAST, instance)) 2380 { 2381 engine = &chip_device->engNVLTLC_MULTICAST_BCAST[instance]; 2382 } 2383 } 2384 else 2385 { 2386 if (NVSWITCH_ENG_VALID_LS10(device, NVLTLC_MULTICAST, instance)) 2387 { 2388 engine = &chip_device->engNVLTLC_MULTICAST[instance]; 2389 } 2390 } 2391 break; 2392 2393 case REGISTER_RW_ENGINE_NPG: 2394 if (bcast) 2395 { 2396 if (NVSWITCH_ENG_VALID_LS10(device, NPG_BCAST, instance)) 2397 { 2398 engine = &chip_device->engNPG_BCAST[instance]; 2399 } 2400 } 2401 else 2402 { 2403 if (NVSWITCH_ENG_VALID_LS10(device, NPG, instance)) 2404 { 2405 engine = &chip_device->engNPG[instance]; 2406 } 2407 } 2408 break; 2409 2410 case REGISTER_RW_ENGINE_NPORT: 2411 if (bcast) 2412 { 2413 if (NVSWITCH_ENG_VALID_LS10(device, NPORT_BCAST, instance)) 2414 { 2415 engine = &chip_device->engNPORT_BCAST[instance]; 2416 } 2417 } 2418 else 2419 { 2420 if (NVSWITCH_ENG_VALID_LS10(device, NPORT, instance)) 2421 { 2422 engine = &chip_device->engNPORT[instance]; 2423 } 2424 } 2425 break; 2426 2427 case REGISTER_RW_ENGINE_NPORT_MULTICAST: 2428 if (bcast) 2429 { 2430 if (NVSWITCH_ENG_VALID_LS10(device, NPORT_MULTICAST_BCAST, instance)) 2431 { 2432 engine = &chip_device->engNPORT_MULTICAST_BCAST[instance]; 2433 } 2434 } 2435 else 2436 { 2437 if (NVSWITCH_ENG_VALID_LS10(device, NPORT_MULTICAST, instance)) 2438 { 2439 engine = &chip_device->engNPORT_MULTICAST[instance]; 2440 } 2441 } 2442 break; 2443 2444 case REGISTER_RW_ENGINE_NVLIPT_LNK: 2445 if (bcast) 2446 { 2447 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT_LNK_BCAST, instance)) 2448 { 2449 engine = &chip_device->engNVLIPT_LNK_BCAST[instance]; 2450 } 2451 } 2452 else 2453 { 2454 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT_LNK, instance)) 2455 { 2456 engine = &chip_device->engNVLIPT_LNK[instance]; 2457 } 2458 } 2459 break; 2460 2461 case REGISTER_RW_ENGINE_NVLIPT_LNK_MULTICAST: 2462 if (bcast) 2463 { 2464 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT_LNK_MULTICAST_BCAST, instance)) 2465 { 2466 engine = &chip_device->engNVLIPT_LNK_MULTICAST_BCAST[instance]; 2467 } 2468 } 2469 else 2470 { 2471 if (NVSWITCH_ENG_VALID_LS10(device, NVLIPT_LNK_MULTICAST, instance)) 2472 { 2473 engine = &chip_device->engNVLIPT_LNK_MULTICAST[instance]; 2474 } 2475 } 2476 break; 2477 2478 case REGISTER_RW_ENGINE_NVLDL: 2479 if (bcast) 2480 { 2481 if (NVSWITCH_ENG_VALID_LS10(device, NVLDL_BCAST, instance)) 2482 { 2483 engine = &chip_device->engNVLDL_BCAST[instance]; 2484 } 2485 } 2486 else 2487 { 2488 if (NVSWITCH_ENG_VALID_LS10(device, NVLDL, instance)) 2489 { 2490 engine = &chip_device->engNVLDL[instance]; 2491 } 2492 } 2493 break; 2494 2495 case REGISTER_RW_ENGINE_NVLDL_MULTICAST: 2496 if (bcast) 2497 { 2498 if (NVSWITCH_ENG_VALID_LS10(device, NVLDL_MULTICAST_BCAST, instance)) 2499 { 2500 engine = &chip_device->engNVLDL_MULTICAST_BCAST[instance]; 2501 } 2502 } 2503 else 2504 { 2505 if (NVSWITCH_ENG_VALID_LS10(device, NVLDL_MULTICAST, instance)) 2506 { 2507 engine = &chip_device->engNVLDL_MULTICAST[instance]; 2508 } 2509 } 2510 break; 2511 2512 case REGISTER_RW_ENGINE_NXBAR: 2513 if (bcast) 2514 { 2515 if (NVSWITCH_ENG_VALID_LS10(device, NXBAR_BCAST, instance)) 2516 { 2517 engine = &chip_device->engNXBAR_BCAST[instance]; 2518 } 2519 } 2520 else 2521 { 2522 if (NVSWITCH_ENG_VALID_LS10(device, NXBAR, instance)) 2523 { 2524 engine = &chip_device->engNXBAR[instance]; 2525 } 2526 } 2527 break; 2528 2529 case REGISTER_RW_ENGINE_TILE: 2530 if (bcast) 2531 { 2532 if (NVSWITCH_ENG_VALID_LS10(device, TILE_BCAST, instance)) 2533 { 2534 engine = &chip_device->engTILE_BCAST[instance]; 2535 } 2536 } 2537 else 2538 { 2539 if (NVSWITCH_ENG_VALID_LS10(device, TILE, instance)) 2540 { 2541 engine = &chip_device->engTILE[instance]; 2542 } 2543 } 2544 break; 2545 2546 case REGISTER_RW_ENGINE_TILE_MULTICAST: 2547 if (bcast) 2548 { 2549 if (NVSWITCH_ENG_VALID_LS10(device, TILE_MULTICAST_BCAST, instance)) 2550 { 2551 engine = &chip_device->engTILE_MULTICAST_BCAST[instance]; 2552 } 2553 } 2554 else 2555 { 2556 if (NVSWITCH_ENG_VALID_LS10(device, TILE_MULTICAST, instance)) 2557 { 2558 engine = &chip_device->engTILE_MULTICAST[instance]; 2559 } 2560 } 2561 break; 2562 2563 case REGISTER_RW_ENGINE_TILEOUT: 2564 if (bcast) 2565 { 2566 if (NVSWITCH_ENG_VALID_LS10(device, TILEOUT_BCAST, instance)) 2567 { 2568 engine = &chip_device->engTILEOUT_BCAST[instance]; 2569 } 2570 } 2571 else 2572 { 2573 if (NVSWITCH_ENG_VALID_LS10(device, TILEOUT, instance)) 2574 { 2575 engine = &chip_device->engTILEOUT[instance]; 2576 } 2577 } 2578 break; 2579 2580 case REGISTER_RW_ENGINE_TILEOUT_MULTICAST: 2581 if (bcast) 2582 { 2583 if (NVSWITCH_ENG_VALID_LS10(device, TILEOUT_MULTICAST_BCAST, instance)) 2584 { 2585 engine = &chip_device->engTILEOUT_MULTICAST_BCAST[instance]; 2586 } 2587 } 2588 else 2589 { 2590 if (NVSWITCH_ENG_VALID_LS10(device, TILEOUT_MULTICAST, instance)) 2591 { 2592 engine = &chip_device->engTILEOUT_MULTICAST[instance]; 2593 } 2594 } 2595 break; 2596 2597 default: 2598 NVSWITCH_PRINT(device, ERROR, 2599 "%s: unknown REGISTER_RW_ENGINE 0x%x\n", 2600 __FUNCTION__, 2601 register_rw_engine); 2602 engine = NULL; 2603 break; 2604 } 2605 2606 if (register_rw_engine == REGISTER_RW_ENGINE_RAW) 2607 { 2608 // Raw IO -- client provides full BAR0 offset 2609 base = 0; 2610 } 2611 else 2612 { 2613 // Check engine descriptor was found and valid 2614 if (engine == NULL) 2615 { 2616 retval = -NVL_BAD_ARGS; 2617 NVSWITCH_PRINT(device, ERROR, 2618 "%s: invalid REGISTER_RW_ENGINE/instance 0x%x(%d)\n", 2619 __FUNCTION__, 2620 register_rw_engine, 2621 instance); 2622 } 2623 else if (!engine->valid) 2624 { 2625 retval = -NVL_UNBOUND_DEVICE; 2626 NVSWITCH_PRINT(device, ERROR, 2627 "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) disabled or invalid\n", 2628 __FUNCTION__, 2629 register_rw_engine, 2630 instance); 2631 } 2632 else 2633 { 2634 if (bcast && (engine->disc_type == DISCOVERY_TYPE_BROADCAST)) 2635 { 2636 // 2637 // Caveat emptor: A read of a broadcast register is 2638 // implementation-specific. 2639 // 2640 base = engine->info.bc.bc_addr; 2641 } 2642 else if ((!bcast) && (engine->disc_type == DISCOVERY_TYPE_UNICAST)) 2643 { 2644 base = engine->info.uc.uc_addr; 2645 } 2646 2647 if (base == 0) 2648 { 2649 NVSWITCH_PRINT(device, ERROR, 2650 "%s: REGISTER_RW_ENGINE/instance 0x%x(%d) has %s base address 0!\n", 2651 __FUNCTION__, 2652 register_rw_engine, 2653 instance, 2654 (bcast ? "BCAST" : "UNICAST" )); 2655 retval = -NVL_IO_ERROR; 2656 } 2657 } 2658 } 2659 2660 *base_addr = base; 2661 return retval; 2662 } 2663 2664 /* 2665 * CTRL_NVSWITCH_REGISTER_READ 2666 * 2667 * This provides direct access to the MMIO space for trusted clients like 2668 * MODS. 2669 * This API should not be exposed to unsecure clients. 2670 */ 2671 2672 static NvlStatus 2673 nvswitch_ctrl_register_read_ls10 2674 ( 2675 nvswitch_device *device, 2676 NVSWITCH_REGISTER_READ *p 2677 ) 2678 { 2679 NvU32 base; 2680 NvU32 data; 2681 NvlStatus retval = NVL_SUCCESS; 2682 2683 retval = _nvswitch_get_engine_base_ls10(device, p->engine, p->instance, NV_FALSE, &base); 2684 if (retval != NVL_SUCCESS) 2685 { 2686 return retval; 2687 } 2688 2689 // Make sure target offset isn't out-of-range 2690 if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize) 2691 { 2692 return -NVL_IO_ERROR; 2693 } 2694 2695 // 2696 // Some legacy device manuals are not 0-based (IP style). 2697 // 2698 data = NVSWITCH_OFF_RD32(device, base + p->offset); 2699 p->val = data; 2700 2701 return NVL_SUCCESS; 2702 } 2703 2704 /* 2705 * CTRL_NVSWITCH_REGISTER_WRITE 2706 * 2707 * This provides direct access to the MMIO space for trusted clients like 2708 * MODS. 2709 * This API should not be exposed to unsecure clients. 2710 */ 2711 2712 static NvlStatus 2713 nvswitch_ctrl_register_write_ls10 2714 ( 2715 nvswitch_device *device, 2716 NVSWITCH_REGISTER_WRITE *p 2717 ) 2718 { 2719 NvU32 base; 2720 NvlStatus retval = NVL_SUCCESS; 2721 2722 retval = _nvswitch_get_engine_base_ls10(device, p->engine, p->instance, p->bcast, &base); 2723 if (retval != NVL_SUCCESS) 2724 { 2725 return retval; 2726 } 2727 2728 // Make sure target offset isn't out-of-range 2729 if ((base + p->offset) >= device->nvlink_device->pciInfo.bars[0].barSize) 2730 { 2731 return -NVL_IO_ERROR; 2732 } 2733 2734 // 2735 // Some legacy device manuals are not 0-based (IP style). 2736 // 2737 NVSWITCH_OFF_WR32(device, base + p->offset, p->val); 2738 2739 return NVL_SUCCESS; 2740 } 2741 2742 NvlStatus 2743 nvswitch_get_nvlink_ecc_errors_ls10 2744 ( 2745 nvswitch_device *device, 2746 NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params 2747 ) 2748 { 2749 NvU32 statData; 2750 NvU8 i, j; 2751 NvlStatus status; 2752 NvBool bLaneReversed; 2753 2754 nvswitch_os_memset(params->errorLink, 0, sizeof(params->errorLink)); 2755 2756 FOR_EACH_INDEX_IN_MASK(64, i, params->linkMask) 2757 { 2758 nvlink_link *link; 2759 NVSWITCH_LANE_ERROR *errorLane; 2760 NvU8 offset; 2761 NvBool minion_enabled; 2762 NvU32 sublinkWidth; 2763 2764 link = nvswitch_get_link(device, i); 2765 2766 if ((link == NULL) || 2767 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, link->linkNumber) || 2768 (i >= NVSWITCH_LINK_COUNT(device))) 2769 { 2770 return -NVL_BAD_ARGS; 2771 } 2772 2773 sublinkWidth = device->hal.nvswitch_get_sublink_width(device, i); 2774 2775 minion_enabled = nvswitch_is_minion_initialized(device, 2776 NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)); 2777 2778 bLaneReversed = nvswitch_link_lane_reversed_ls10(device, link->linkNumber); 2779 2780 for (j = 0; j < NVSWITCH_NUM_LANES_LS10; j++) 2781 { 2782 if (minion_enabled && (j < sublinkWidth)) 2783 { 2784 status = nvswitch_minion_get_dl_status(device, i, 2785 (NV_NVLSTAT_RX12 + j), 0, &statData); 2786 2787 if (status != NVL_SUCCESS) 2788 { 2789 return status; 2790 } 2791 offset = bLaneReversed ? ((sublinkWidth - 1) - j) : j; 2792 errorLane = ¶ms->errorLink[i].errorLane[offset]; 2793 errorLane->valid = NV_TRUE; 2794 } 2795 else 2796 { 2797 // MINION disabled 2798 statData = 0; 2799 offset = j; 2800 errorLane = ¶ms->errorLink[i].errorLane[offset]; 2801 errorLane->valid = NV_FALSE; 2802 } 2803 2804 errorLane->eccErrorValue = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_VALUE, statData); 2805 errorLane->overflowed = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_OVER, statData); 2806 } 2807 2808 if (minion_enabled) 2809 { 2810 status = nvswitch_minion_get_dl_status(device, i, 2811 NV_NVLSTAT_RX11, 0, &statData); 2812 if (status != NVL_SUCCESS) 2813 { 2814 return status; 2815 } 2816 } 2817 else 2818 { 2819 statData = 0; 2820 } 2821 2822 params->errorLink[i].eccDecFailed = DRF_VAL(_NVLSTAT, _RX11, _ECC_DEC_FAILED_VALUE, statData); 2823 params->errorLink[i].eccDecFailedOverflowed = DRF_VAL(_NVLSTAT, _RX11, _ECC_DEC_FAILED_OVER, statData); 2824 } 2825 FOR_EACH_INDEX_IN_MASK_END; 2826 2827 return NVL_SUCCESS; 2828 } 2829 2830 NvU32 2831 nvswitch_get_num_links_ls10 2832 ( 2833 nvswitch_device *device 2834 ) 2835 { 2836 return NVSWITCH_NUM_LINKS_LS10; 2837 } 2838 2839 static NvU8 2840 nvswitch_get_num_links_per_nvlipt_ls10 2841 ( 2842 nvswitch_device *device 2843 ) 2844 { 2845 return NVSWITCH_LINKS_PER_NVLIPT_LS10; 2846 } 2847 2848 2849 NvlStatus 2850 nvswitch_ctrl_get_fom_values_ls10 2851 ( 2852 nvswitch_device *device, 2853 NVSWITCH_GET_FOM_VALUES_PARAMS *p 2854 ) 2855 { 2856 NvlStatus status; 2857 NvU32 statData; 2858 nvlink_link *link; 2859 2860 link = nvswitch_get_link(device, p->linkId); 2861 if (link == NULL) 2862 { 2863 NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n", 2864 __FUNCTION__, p->linkId); 2865 return -NVL_BAD_ARGS; 2866 } 2867 2868 if (nvswitch_is_link_in_reset(device, link)) 2869 { 2870 NVSWITCH_PRINT(device, ERROR, "%s: link #%d is in reset\n", 2871 __FUNCTION__, p->linkId); 2872 return -NVL_ERR_INVALID_STATE; 2873 } 2874 2875 status = nvswitch_minion_get_dl_status(device, p->linkId, 2876 NV_NVLSTAT_TR16, 0, &statData); 2877 p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF); 2878 p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF); 2879 2880 status = nvswitch_minion_get_dl_status(device, p->linkId, 2881 NV_NVLSTAT_TR17, 0, &statData); 2882 p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF); 2883 p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF); 2884 2885 p->numLanes = nvswitch_get_sublink_width(device, p->linkId); 2886 2887 return status; 2888 } 2889 2890 void 2891 nvswitch_set_fatal_error_ls10 2892 ( 2893 nvswitch_device *device, 2894 NvBool device_fatal, 2895 NvU32 link_id 2896 ) 2897 { 2898 NvU32 reg; 2899 2900 NVSWITCH_ASSERT(link_id < nvswitch_get_num_links(device)); 2901 2902 device->link[link_id].fatal_error_occurred = NV_TRUE; 2903 2904 if (device_fatal) 2905 { 2906 reg = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _DRIVER_ATTACH_DETACH); 2907 reg = FLD_SET_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _DEVICE_RESET_REQUIRED, 2908 1, reg); 2909 2910 NVSWITCH_SAW_WR32_LS10(device, _NVLSAW, _DRIVER_ATTACH_DETACH, reg); 2911 } 2912 else 2913 { 2914 reg = NVSWITCH_LINK_RD32(device, link_id, NPORT, _NPORT, _SCRATCH_WARM); 2915 reg = FLD_SET_DRF_NUM(_NPORT, _SCRATCH_WARM, _PORT_RESET_REQUIRED, 2916 1, reg); 2917 2918 NVSWITCH_LINK_WR32(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, reg); 2919 } 2920 } 2921 2922 static NvU32 2923 nvswitch_get_latency_sample_interval_msec_ls10 2924 ( 2925 nvswitch_device *device 2926 ) 2927 { 2928 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 2929 return chip_device->latency_stats->sample_interval_msec; 2930 } 2931 2932 static NvU32 2933 nvswitch_get_device_dma_width_ls10 2934 ( 2935 nvswitch_device *device 2936 ) 2937 { 2938 return DMA_ADDR_WIDTH_LS10; 2939 } 2940 2941 static NvU32 2942 nvswitch_get_link_ip_version_ls10 2943 ( 2944 nvswitch_device *device, 2945 NvU32 link_id 2946 ) 2947 { 2948 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 2949 NvU32 nvldl_instance; 2950 2951 nvldl_instance = NVSWITCH_GET_LINK_ENG_INST(device, link_id, NVLDL); 2952 if (NVSWITCH_ENG_IS_VALID(device, NVLDL, nvldl_instance)) 2953 { 2954 return chip_device->engNVLDL[nvldl_instance].version; 2955 } 2956 else 2957 { 2958 NVSWITCH_PRINT(device, ERROR, 2959 "%s: NVLink[0x%x] NVLDL instance invalid\n", 2960 __FUNCTION__, link_id); 2961 return 0; 2962 } 2963 } 2964 2965 static NvBool 2966 nvswitch_is_soe_supported_ls10 2967 ( 2968 nvswitch_device *device 2969 ) 2970 { 2971 if (IS_FMODEL(device)) 2972 { 2973 NVSWITCH_PRINT(device, INFO, "SOE is not yet supported on fmodel\n"); 2974 return NV_FALSE; 2975 } 2976 2977 if (device->regkeys.soe_disable == NV_SWITCH_REGKEY_SOE_DISABLE_YES) 2978 { 2979 NVSWITCH_PRINT(device, WARN, "SOE can not be disabled via regkey.\n"); 2980 } 2981 2982 if (nvswitch_is_tnvl_mode_locked(device)) 2983 { 2984 NVSWITCH_PRINT(device, INFO, 2985 "SOE is not supported when TNVL mode is locked\n"); 2986 return NV_FALSE; 2987 } 2988 2989 return NV_TRUE; 2990 } 2991 2992 NvlStatus 2993 _nvswitch_get_bios_version 2994 ( 2995 nvswitch_device *device, 2996 NvU64 *pVersion 2997 ) 2998 { 2999 NVSWITCH_GET_BIOS_INFO_PARAMS p = { 0 }; 3000 NvlStatus status; 3001 3002 if (pVersion == NULL) 3003 { 3004 return NVL_BAD_ARGS; 3005 } 3006 3007 status = device->hal.nvswitch_ctrl_get_bios_info(device, &p); 3008 if (status == NVL_SUCCESS) 3009 { 3010 *pVersion = p.version; 3011 } 3012 3013 return status; 3014 } 3015 3016 /* 3017 * @Brief : Checks if Inforom is supported 3018 * 3019 */ 3020 NvBool 3021 nvswitch_is_inforom_supported_ls10 3022 ( 3023 nvswitch_device *device 3024 ) 3025 { 3026 NvU64 version; 3027 NvlStatus status; 3028 3029 if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) 3030 { 3031 NVSWITCH_PRINT(device, INFO, 3032 "INFOROM is not supported on non-silicon platform\n"); 3033 return NV_FALSE; 3034 } 3035 3036 if (nvswitch_is_tnvl_mode_enabled(device)) 3037 { 3038 NVSWITCH_PRINT(device, INFO, 3039 "INFOROM is not supported when TNVL mode is enabled\n"); 3040 return NV_FALSE; 3041 } 3042 3043 if (!nvswitch_is_soe_supported(device)) 3044 { 3045 NVSWITCH_PRINT(device, INFO, 3046 "INFOROM is not supported since SOE is not supported\n"); 3047 return NV_FALSE; 3048 } 3049 3050 status = _nvswitch_get_bios_version(device, &version); 3051 if (status != NVL_SUCCESS) 3052 { 3053 NVSWITCH_PRINT(device, ERROR, "%s: Error getting BIOS version\n", 3054 __FUNCTION__); 3055 return NV_FALSE; 3056 } 3057 3058 if (version >= NVSWITCH_IFR_MIN_BIOS_VER_LS10) 3059 { 3060 return NV_TRUE; 3061 } 3062 else 3063 { 3064 NVSWITCH_PRINT(device, WARN, 3065 "INFOROM is not supported on this NVSwitch BIOS version.\n"); 3066 return NV_FALSE; 3067 } 3068 } 3069 3070 /* 3071 * @Brief : Checks if Spi is supported 3072 * 3073 * Stubbing SOE Spi support on ls10. 3074 * 3075 */ 3076 NvBool 3077 nvswitch_is_spi_supported_ls10 3078 ( 3079 nvswitch_device *device 3080 ) 3081 { 3082 NVSWITCH_PRINT(device, INFO, 3083 "SPI is not supported on LS10\n"); 3084 3085 return NV_FALSE; 3086 } 3087 3088 NvBool 3089 nvswitch_is_bios_supported_ls10 3090 ( 3091 nvswitch_device *device 3092 ) 3093 { 3094 if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) 3095 { 3096 NVSWITCH_PRINT(device, INFO, 3097 "BIOS is not supported on non-silicon platforms\n"); 3098 return NV_FALSE; 3099 } 3100 3101 if (!nvswitch_is_soe_supported(device)) 3102 { 3103 NVSWITCH_PRINT(device, INFO, 3104 "BIOS is not supported since SOE is not supported\n"); 3105 return NV_FALSE; 3106 } 3107 3108 return NV_TRUE; 3109 } 3110 3111 NvlStatus 3112 nvswitch_get_bios_size_ls10 3113 ( 3114 nvswitch_device *device, 3115 NvU32 *pSize 3116 ) 3117 { 3118 *pSize = SOE_CORE_BIOS_SIZE_LS10; 3119 3120 return NVL_SUCCESS; 3121 } 3122 3123 /* 3124 * @Brief : Check if SMBPBI is supported 3125 * 3126 */ 3127 NvBool 3128 nvswitch_is_smbpbi_supported_ls10 3129 ( 3130 nvswitch_device *device 3131 ) 3132 { 3133 NvU64 version; 3134 NvlStatus status; 3135 3136 if (!nvswitch_is_smbpbi_supported_lr10(device)) 3137 { 3138 return NV_FALSE; 3139 } 3140 3141 if (nvswitch_is_tnvl_mode_enabled(device)) 3142 { 3143 NVSWITCH_PRINT(device, INFO, 3144 "SMBPBI is not supported when TNVL mode is enabled\n"); 3145 return NV_FALSE; 3146 } 3147 3148 status = _nvswitch_get_bios_version(device, &version); 3149 if (status != NVL_SUCCESS) 3150 { 3151 NVSWITCH_PRINT(device, ERROR, "%s: Error getting BIOS version\n", 3152 __FUNCTION__); 3153 return NV_FALSE; 3154 } 3155 3156 if (version >= NVSWITCH_SMBPBI_MIN_BIOS_VER_LS10) 3157 { 3158 return NV_TRUE; 3159 } 3160 else 3161 { 3162 NVSWITCH_PRINT(device, WARN, 3163 "SMBPBI is not supported on NVSwitch BIOS version %llx.\n", version); 3164 return NV_FALSE; 3165 } 3166 } 3167 3168 /* 3169 * @Brief : Additional setup needed after blacklisted device initialization 3170 * 3171 * @Description : 3172 * 3173 * @param[in] device a reference to the device to initialize 3174 */ 3175 void 3176 nvswitch_post_init_blacklist_device_setup_ls10 3177 ( 3178 nvswitch_device *device 3179 ) 3180 { 3181 NVSWITCH_PRINT(device, WARN, "%s: Function not implemented\n", __FUNCTION__); 3182 return; 3183 } 3184 3185 /* 3186 * @brief: This function retrieves the NVLIPT public ID for a given global link idx 3187 * @params[in] device reference to current nvswitch device 3188 * @params[in] linkId link to retrieve NVLIPT public ID from 3189 * @params[out] publicId Public ID of NVLIPT owning linkId 3190 */ 3191 NvlStatus nvswitch_get_link_public_id_ls10 3192 ( 3193 nvswitch_device *device, 3194 NvU32 linkId, 3195 NvU32 *publicId 3196 ) 3197 { 3198 if (!device->hal.nvswitch_is_link_valid(device, linkId) || 3199 (publicId == NULL)) 3200 { 3201 return -NVL_BAD_ARGS; 3202 } 3203 3204 *publicId = NVSWITCH_NVLIPT_GET_PUBLIC_ID_LS10(linkId); 3205 3206 3207 return (NVSWITCH_ENG_VALID_LS10(device, NVLIPT, *publicId)) ? 3208 NVL_SUCCESS : -NVL_BAD_ARGS; 3209 } 3210 3211 /* 3212 * @brief: This function retrieves the internal link idx for a given global link idx 3213 * @params[in] device reference to current nvswitch device 3214 * @params[in] linkId link to retrieve NVLIPT public ID from 3215 * @params[out] localLinkIdx Internal link index of linkId 3216 */ 3217 NvlStatus nvswitch_get_link_local_idx_ls10 3218 ( 3219 nvswitch_device *device, 3220 NvU32 linkId, 3221 NvU32 *localLinkIdx 3222 ) 3223 { 3224 if (!device->hal.nvswitch_is_link_valid(device, linkId) || 3225 (localLinkIdx == NULL)) 3226 { 3227 return -NVL_BAD_ARGS; 3228 } 3229 3230 *localLinkIdx = NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LS10(linkId); 3231 3232 return NVL_SUCCESS; 3233 } 3234 3235 NvlStatus nvswitch_ctrl_get_fatal_error_scope_ls10 3236 ( 3237 nvswitch_device *device, 3238 NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams 3239 ) 3240 { 3241 NvU32 linkId; 3242 NvU32 reg = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _DRIVER_ATTACH_DETACH); 3243 pParams->device = FLD_TEST_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _DEVICE_RESET_REQUIRED, 3244 1, reg); 3245 3246 for (linkId = 0; linkId < NVSWITCH_MAX_PORTS; linkId++) 3247 { 3248 if (!nvswitch_is_link_valid(device, linkId)) 3249 { 3250 pParams->port[linkId] = NV_FALSE; 3251 continue; 3252 } 3253 3254 reg = NVSWITCH_LINK_RD32(device, linkId, NPORT, _NPORT, _SCRATCH_WARM); 3255 pParams->port[linkId] = FLD_TEST_DRF_NUM(_NPORT, _SCRATCH_WARM, 3256 _PORT_RESET_REQUIRED, 1, reg); 3257 } 3258 3259 return NVL_SUCCESS; 3260 } 3261 3262 /* 3263 * CTRL_NVSWITCH_SET_REMAP_POLICY 3264 */ 3265 3266 NvlStatus 3267 nvswitch_get_remap_table_selector_ls10 3268 ( 3269 nvswitch_device *device, 3270 NVSWITCH_TABLE_SELECT_REMAP table_selector, 3271 NvU32 *remap_ram_sel 3272 ) 3273 { 3274 NvU32 ram_sel = 0; 3275 3276 switch (table_selector) 3277 { 3278 case NVSWITCH_TABLE_SELECT_REMAP_PRIMARY: 3279 ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSNORMREMAPRAM; 3280 break; 3281 case NVSWITCH_TABLE_SELECT_REMAP_EXTA: 3282 ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSEXTAREMAPRAM; 3283 break; 3284 case NVSWITCH_TABLE_SELECT_REMAP_EXTB: 3285 ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSEXTBREMAPRAM; 3286 break; 3287 case NVSWITCH_TABLE_SELECT_REMAP_MULTICAST: 3288 ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT_MULTICAST_REMAPRAM; 3289 break; 3290 default: 3291 NVSWITCH_PRINT(device, ERROR, "%s: invalid remap table selector (0x%x)\n", 3292 __FUNCTION__, table_selector); 3293 return -NVL_ERR_NOT_SUPPORTED; 3294 break; 3295 } 3296 3297 if (remap_ram_sel) 3298 { 3299 *remap_ram_sel = ram_sel; 3300 } 3301 3302 return NVL_SUCCESS; 3303 } 3304 3305 NvU32 3306 nvswitch_get_ingress_ram_size_ls10 3307 ( 3308 nvswitch_device *device, 3309 NvU32 ingress_ram_selector // NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT* 3310 ) 3311 { 3312 NvU32 ram_size = 0; 3313 3314 switch (ingress_ram_selector) 3315 { 3316 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSNORMREMAPRAM: 3317 ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_NORMREMAPTAB_DEPTH + 1; 3318 break; 3319 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSEXTAREMAPRAM: 3320 ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_EXTAREMAPTAB_DEPTH + 1; 3321 break; 3322 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSEXTBREMAPRAM: 3323 ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_EXTBREMAPTAB_DEPTH + 1; 3324 break; 3325 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM: 3326 ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH + 1; 3327 break; 3328 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM: 3329 ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH + 1; 3330 break; 3331 case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT_MULTICAST_REMAPRAM: 3332 ram_size = NV_INGRESS_MCREMAPTABADDR_RAM_ADDRESS_MCREMAPTAB_DEPTH + 1; 3333 break; 3334 default: 3335 NVSWITCH_PRINT(device, ERROR, "%s: Unsupported ingress RAM selector (0x%x)\n", 3336 __FUNCTION__, ingress_ram_selector); 3337 break; 3338 } 3339 3340 return ram_size; 3341 } 3342 3343 static void 3344 _nvswitch_set_remap_policy_ls10 3345 ( 3346 nvswitch_device *device, 3347 NvU32 portNum, 3348 NvU32 remap_ram_sel, 3349 NvU32 firstIndex, 3350 NvU32 numEntries, 3351 NVSWITCH_REMAP_POLICY_ENTRY *remap_policy 3352 ) 3353 { 3354 NvU32 i; 3355 NvU32 remap_address; 3356 NvU32 address_base; 3357 NvU32 address_limit; 3358 NvU32 rfunc; 3359 3360 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR, 3361 DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) | 3362 DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, remap_ram_sel) | 3363 DRF_DEF(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, _ENABLE)); 3364 3365 for (i = 0; i < numEntries; i++) 3366 { 3367 // Set each field if enabled, else set it to 0. 3368 remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LS10, remap_policy[i].address); 3369 address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LS10, remap_policy[i].addressBase); 3370 address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LS10, remap_policy[i].addressLimit); 3371 rfunc = remap_policy[i].flags & 3372 ( 3373 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR | 3374 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK | 3375 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE | 3376 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE | 3377 NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE 3378 ); 3379 // Handle re-used RFUNC[5] conflict between Limerock and Laguna Seca 3380 if (rfunc & NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE) 3381 { 3382 // 3383 // RFUNC[5] Limerock functionality was deprecated and replaced with 3384 // a new function in Laguna Seca. So fix RFUNC if needed. 3385 // 3386 3387 rfunc &= ~NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE; 3388 rfunc |= NVBIT(5); 3389 } 3390 3391 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA1, 3392 DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) | 3393 DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk)); 3394 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA2, 3395 DRF_NUM(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep)); 3396 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA3, 3397 DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_BASE, address_base) | 3398 DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, address_limit)); 3399 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA4, 3400 DRF_NUM(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy[i].targetId) | 3401 DRF_NUM(_INGRESS, _REMAPTABDATA4, _RFUNC, rfunc)); 3402 // Get the upper bits of address_base/_limit 3403 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA5, 3404 DRF_NUM(_INGRESS, _REMAPTABDATA5, _ADR_BASE, 3405 (address_base >> DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_BASE))) | 3406 DRF_NUM(_INGRESS, _REMAPTABDATA5, _ADR_LIMIT, 3407 (address_limit >> DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_LIMIT)))); 3408 3409 // Write last and auto-increment 3410 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA0, 3411 DRF_NUM(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_address) | 3412 DRF_NUM(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) | 3413 DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid)); 3414 } 3415 } 3416 3417 static void 3418 _nvswitch_set_mc_remap_policy_ls10 3419 ( 3420 nvswitch_device *device, 3421 NvU32 portNum, 3422 NvU32 firstIndex, 3423 NvU32 numEntries, 3424 NVSWITCH_REMAP_POLICY_ENTRY *remap_policy 3425 ) 3426 { 3427 NvU32 i; 3428 NvU32 remap_address; 3429 NvU32 address_base; 3430 NvU32 address_limit; 3431 NvU32 rfunc; 3432 NvU32 reflective; 3433 3434 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABADDR, 3435 DRF_NUM(_INGRESS, _MCREMAPTABADDR, _RAM_ADDRESS, firstIndex) | 3436 DRF_DEF(_INGRESS, _MCREMAPTABADDR, _AUTO_INCR, _ENABLE)); 3437 3438 for (i = 0; i < numEntries; i++) 3439 { 3440 // Set each field if enabled, else set it to 0. 3441 remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LS10, remap_policy[i].address); 3442 address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LS10, remap_policy[i].addressBase); 3443 address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LS10, remap_policy[i].addressLimit); 3444 rfunc = remap_policy[i].flags & 3445 ( 3446 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR | 3447 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK | 3448 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE | 3449 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE | 3450 NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE 3451 ); 3452 // Handle re-used RFUNC[5] conflict between Limerock and Laguna Seca 3453 if (rfunc & NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE) 3454 { 3455 // 3456 // RFUNC[5] Limerock functionality was deprecated and replaced with 3457 // a new function in Laguna Seca. So fix RFUNC if needed. 3458 // 3459 3460 rfunc &= ~NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE; 3461 rfunc |= NVBIT(5); 3462 } 3463 reflective = (remap_policy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_REFLECTIVE ? 1 : 0); 3464 3465 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA1, 3466 DRF_NUM(_INGRESS, _MCREMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) | 3467 DRF_NUM(_INGRESS, _MCREMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk)); 3468 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA2, 3469 DRF_NUM(_INGRESS, _MCREMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep)); 3470 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA3, 3471 DRF_NUM(_INGRESS, _MCREMAPTABDATA3, _ADR_BASE, address_base) | 3472 DRF_NUM(_INGRESS, _MCREMAPTABDATA3, _ADR_LIMIT, address_limit)); 3473 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA4, 3474 DRF_NUM(_INGRESS, _MCREMAPTABDATA4, _MCID, remap_policy[i].targetId) | 3475 DRF_NUM(_INGRESS, _MCREMAPTABDATA4, _RFUNC, rfunc) | 3476 DRF_NUM(_INGRESS, _MCREMAPTABDATA4, _ENB_REFLECT_MEM, reflective)); 3477 // Get the upper bits of address_base/_limit 3478 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA5, 3479 DRF_NUM(_INGRESS, _MCREMAPTABDATA5, _ADR_BASE, 3480 (address_base >> DRF_SIZE(NV_INGRESS_MCREMAPTABDATA3_ADR_BASE))) | 3481 DRF_NUM(_INGRESS, _MCREMAPTABDATA5, _ADR_LIMIT, 3482 (address_limit >> DRF_SIZE(NV_INGRESS_MCREMAPTABDATA3_ADR_LIMIT)))); 3483 3484 // Write last and auto-increment 3485 NVSWITCH_LINK_WR32_LS10(device, portNum, NPORT, _INGRESS, _MCREMAPTABDATA0, 3486 DRF_NUM(_INGRESS, _MCREMAPTABDATA0, _RMAP_ADDR, remap_address) | 3487 DRF_NUM(_INGRESS, _MCREMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) | 3488 DRF_NUM(_INGRESS, _MCREMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid)); 3489 } 3490 } 3491 3492 NvlStatus 3493 nvswitch_ctrl_set_remap_policy_ls10 3494 ( 3495 nvswitch_device *device, 3496 NVSWITCH_SET_REMAP_POLICY *p 3497 ) 3498 { 3499 NvU32 i; 3500 NvU32 rfunc; 3501 NvU32 remap_ram_sel = ~0; 3502 NvU32 ram_size; 3503 NvlStatus retval = NVL_SUCCESS; 3504 3505 // 3506 // This function is used to read both normal and multicast REMAP table, 3507 // so guarantee table definitions are identical. 3508 // 3509 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA0_RMAP_ADDR) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA0_RMAP_ADDR)); 3510 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA0_IRL_SEL) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA0_IRL_SEL)); 3511 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA1_REQCTXT_MSK)); 3512 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA1_REQCTXT_CHK)); 3513 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA2_REQCTXT_REP)); 3514 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_BASE) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA3_ADR_BASE)); 3515 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_LIMIT) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA3_ADR_LIMIT)); 3516 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA4_RFUNC) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA4_RFUNC)); 3517 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA5_ADR_BASE) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA5_ADR_BASE)); 3518 ct_assert(DRF_SIZE(NV_INGRESS_REMAPTABDATA5_ADR_LIMIT) == DRF_SIZE(NV_INGRESS_MCREMAPTABDATA5_ADR_LIMIT)); 3519 3520 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, p->portNum)) 3521 { 3522 NVSWITCH_PRINT(device, ERROR, 3523 "NPORT port #%d not valid\n", 3524 p->portNum); 3525 return -NVL_BAD_ARGS; 3526 } 3527 3528 retval = nvswitch_get_remap_table_selector(device, p->tableSelect, &remap_ram_sel); 3529 if (retval != NVL_SUCCESS) 3530 { 3531 NVSWITCH_PRINT(device, ERROR, 3532 "Remap table #%d not supported\n", 3533 p->tableSelect); 3534 return retval; 3535 } 3536 ram_size = nvswitch_get_ingress_ram_size(device, remap_ram_sel); 3537 3538 if ((p->firstIndex >= ram_size) || 3539 (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) || 3540 (p->firstIndex + p->numEntries > ram_size)) 3541 { 3542 NVSWITCH_PRINT(device, ERROR, 3543 "remapPolicy[%d..%d] overflows range %d..%d or size %d.\n", 3544 p->firstIndex, p->firstIndex + p->numEntries - 1, 3545 0, ram_size - 1, 3546 NVSWITCH_REMAP_POLICY_ENTRIES_MAX); 3547 return -NVL_BAD_ARGS; 3548 } 3549 3550 for (i = 0; i < p->numEntries; i++) 3551 { 3552 if (p->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3553 { 3554 if (p->remapPolicy[i].targetId & 3555 ~DRF_MASK(NV_INGRESS_MCREMAPTABDATA4_MCID)) 3556 { 3557 NVSWITCH_PRINT(device, ERROR, 3558 "remapPolicy[%d].targetId 0x%x out of valid MCID range (0x%x..0x%x)\n", 3559 i, p->remapPolicy[i].targetId, 3560 0, DRF_MASK(NV_INGRESS_MCREMAPTABDATA4_MCID)); 3561 return -NVL_BAD_ARGS; 3562 } 3563 } 3564 else 3565 { 3566 if (p->remapPolicy[i].targetId & 3567 ~DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID)) 3568 { 3569 NVSWITCH_PRINT(device, ERROR, 3570 "remapPolicy[%d].targetId 0x%x out of valid TGTID range (0x%x..0x%x)\n", 3571 i, p->remapPolicy[i].targetId, 3572 0, DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID)); 3573 return -NVL_BAD_ARGS; 3574 } 3575 } 3576 3577 if (p->remapPolicy[i].irlSelect & 3578 ~DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL)) 3579 { 3580 NVSWITCH_PRINT(device, ERROR, 3581 "remapPolicy[%d].irlSelect 0x%x out of valid range (0x%x..0x%x)\n", 3582 i, p->remapPolicy[i].irlSelect, 3583 0, DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL)); 3584 return -NVL_BAD_ARGS; 3585 } 3586 3587 rfunc = p->remapPolicy[i].flags & 3588 ( 3589 NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR | 3590 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK | 3591 NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE | 3592 NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE | 3593 NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE | 3594 NVSWITCH_REMAP_POLICY_FLAGS_REFLECTIVE 3595 ); 3596 if (rfunc != p->remapPolicy[i].flags) 3597 { 3598 NVSWITCH_PRINT(device, ERROR, 3599 "remapPolicy[%d].flags 0x%x has undefined flags (0x%x)\n", 3600 i, p->remapPolicy[i].flags, 3601 p->remapPolicy[i].flags ^ rfunc); 3602 return -NVL_BAD_ARGS; 3603 } 3604 if ((rfunc & NVSWITCH_REMAP_POLICY_FLAGS_REFLECTIVE) && 3605 (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_MULTICAST)) 3606 { 3607 NVSWITCH_PRINT(device, ERROR, 3608 "remapPolicy[%d].flags: REFLECTIVE mapping only supported for MC REMAP\n", 3609 i); 3610 return -NVL_BAD_ARGS; 3611 } 3612 3613 // Validate that only bits 51:39 are used 3614 if (p->remapPolicy[i].address & 3615 ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LS10)) 3616 { 3617 NVSWITCH_PRINT(device, ERROR, 3618 "remapPolicy[%d].address 0x%llx & ~0x%llx != 0\n", 3619 i, p->remapPolicy[i].address, 3620 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LS10)); 3621 return -NVL_BAD_ARGS; 3622 } 3623 3624 if (p->remapPolicy[i].reqCtxMask & 3625 ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK)) 3626 { 3627 NVSWITCH_PRINT(device, ERROR, 3628 "remapPolicy[%d].reqCtxMask 0x%x out of valid range (0x%x..0x%x)\n", 3629 i, p->remapPolicy[i].reqCtxMask, 3630 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK)); 3631 return -NVL_BAD_ARGS; 3632 } 3633 3634 if (p->remapPolicy[i].reqCtxChk & 3635 ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK)) 3636 { 3637 NVSWITCH_PRINT(device, ERROR, 3638 "remapPolicy[%d].reqCtxChk 0x%x out of valid range (0x%x..0x%x)\n", 3639 i, p->remapPolicy[i].reqCtxChk, 3640 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK)); 3641 return -NVL_BAD_ARGS; 3642 } 3643 3644 if (p->remapPolicy[i].reqCtxRep & 3645 ~DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP)) 3646 { 3647 NVSWITCH_PRINT(device, ERROR, 3648 "remapPolicy[%d].reqCtxRep 0x%x out of valid range (0x%x..0x%x)\n", 3649 i, p->remapPolicy[i].reqCtxRep, 3650 0, DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP)); 3651 return -NVL_BAD_ARGS; 3652 } 3653 3654 // Validate that only bits 38:21 are used 3655 if (p->remapPolicy[i].addressBase & 3656 ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LS10)) 3657 { 3658 NVSWITCH_PRINT(device, ERROR, 3659 "remapPolicy[%d].addressBase 0x%llx & ~0x%llx != 0\n", 3660 i, p->remapPolicy[i].addressBase, 3661 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LS10)); 3662 return -NVL_BAD_ARGS; 3663 } 3664 3665 // Validate that only bits 38:21 are used 3666 if (p->remapPolicy[i].addressLimit & 3667 ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LS10)) 3668 { 3669 NVSWITCH_PRINT(device, ERROR, 3670 "remapPolicy[%d].addressLimit 0x%llx & ~0x%llx != 0\n", 3671 i, p->remapPolicy[i].addressLimit, 3672 DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LS10)); 3673 return -NVL_BAD_ARGS; 3674 } 3675 3676 // Validate base & limit describe a region 3677 if (p->remapPolicy[i].addressBase > p->remapPolicy[i].addressLimit) 3678 { 3679 NVSWITCH_PRINT(device, ERROR, 3680 "remapPolicy[%d].addressBase/Limit invalid: 0x%llx > 0x%llx\n", 3681 i, p->remapPolicy[i].addressBase, p->remapPolicy[i].addressLimit); 3682 return -NVL_BAD_ARGS; 3683 } 3684 3685 // Validate limit - base doesn't overflow 64G 3686 if ((p->remapPolicy[i].addressLimit - p->remapPolicy[i].addressBase) & 3687 ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LS10)) 3688 { 3689 NVSWITCH_PRINT(device, ERROR, 3690 "remapPolicy[%d].addressLimit 0x%llx - addressBase 0x%llx overflows 64GB\n", 3691 i, p->remapPolicy[i].addressLimit, p->remapPolicy[i].addressBase); 3692 return -NVL_BAD_ARGS; 3693 } 3694 3695 // AddressOffset is deprecated in LS10 and later 3696 if (p->remapPolicy[i].addressOffset != 0) 3697 { 3698 NVSWITCH_PRINT(device, ERROR, 3699 "remapPolicy[%d].addressOffset deprecated\n", 3700 i); 3701 return -NVL_BAD_ARGS; 3702 } 3703 } 3704 3705 if (p->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3706 { 3707 _nvswitch_set_mc_remap_policy_ls10(device, p->portNum, p->firstIndex, p->numEntries, p->remapPolicy); 3708 } 3709 else 3710 { 3711 _nvswitch_set_remap_policy_ls10(device, p->portNum, remap_ram_sel, p->firstIndex, p->numEntries, p->remapPolicy); 3712 } 3713 3714 return retval; 3715 } 3716 3717 /* 3718 * CTRL_NVSWITCH_GET_REMAP_POLICY 3719 */ 3720 3721 #define NVSWITCH_NUM_REMAP_POLICY_REGS_LS10 6 3722 3723 NvlStatus 3724 nvswitch_ctrl_get_remap_policy_ls10 3725 ( 3726 nvswitch_device *device, 3727 NVSWITCH_GET_REMAP_POLICY_PARAMS *params 3728 ) 3729 { 3730 NVSWITCH_REMAP_POLICY_ENTRY *remap_policy; 3731 NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LS10]; // 6 word/REMAP table entry 3732 NvU32 table_index; 3733 NvU32 remap_count; 3734 NvU32 remap_address; 3735 NvU32 address_base; 3736 NvU32 address_limit; 3737 NvU32 remap_ram_sel; 3738 NvU32 ram_size; 3739 NvlStatus retval; 3740 3741 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, params->portNum)) 3742 { 3743 NVSWITCH_PRINT(device, ERROR, 3744 "NPORT port #%d not valid\n", 3745 params->portNum); 3746 return -NVL_BAD_ARGS; 3747 } 3748 3749 retval = nvswitch_get_remap_table_selector(device, params->tableSelect, &remap_ram_sel); 3750 if (retval != NVL_SUCCESS) 3751 { 3752 NVSWITCH_PRINT(device, ERROR, 3753 "Remap table #%d not supported\n", 3754 params->tableSelect); 3755 return retval; 3756 } 3757 3758 ram_size = nvswitch_get_ingress_ram_size(device, remap_ram_sel); 3759 if ((params->firstIndex >= ram_size)) 3760 { 3761 NVSWITCH_PRINT(device, ERROR, 3762 "%s: remapPolicy first index %d out of range[%d..%d].\n", 3763 __FUNCTION__, params->firstIndex, 0, ram_size - 1); 3764 return -NVL_BAD_ARGS; 3765 } 3766 3767 nvswitch_os_memset(params->entry, 0, (NVSWITCH_REMAP_POLICY_ENTRIES_MAX * 3768 sizeof(NVSWITCH_REMAP_POLICY_ENTRY))); 3769 3770 table_index = params->firstIndex; 3771 remap_policy = params->entry; 3772 remap_count = 0; 3773 3774 /* set table offset */ 3775 if (params->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3776 { 3777 NVSWITCH_LINK_WR32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABADDR, 3778 DRF_NUM(_INGRESS, _MCREMAPTABADDR, _RAM_ADDRESS, params->firstIndex) | 3779 DRF_DEF(_INGRESS, _MCREMAPTABADDR, _AUTO_INCR, _ENABLE)); 3780 } 3781 else 3782 { 3783 NVSWITCH_LINK_WR32_LS10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, 3784 DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) | 3785 DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, remap_ram_sel) | 3786 DRF_DEF(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, _ENABLE)); 3787 } 3788 3789 while (remap_count < NVSWITCH_REMAP_POLICY_ENTRIES_MAX && 3790 table_index < ram_size) 3791 { 3792 if (params->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3793 { 3794 remap_policy_data[0] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA0); 3795 remap_policy_data[1] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA1); 3796 remap_policy_data[2] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA2); 3797 remap_policy_data[3] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA3); 3798 remap_policy_data[4] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA4); 3799 remap_policy_data[5] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _MCREMAPTABDATA5); 3800 } 3801 else 3802 { 3803 remap_policy_data[0] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA0); 3804 remap_policy_data[1] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA1); 3805 remap_policy_data[2] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA2); 3806 remap_policy_data[3] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA3); 3807 remap_policy_data[4] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA4); 3808 remap_policy_data[5] = NVSWITCH_LINK_RD32_LS10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA5); 3809 } 3810 3811 /* add to remap_entries list if nonzero */ 3812 if (remap_policy_data[0] || remap_policy_data[1] || remap_policy_data[2] || 3813 remap_policy_data[3] || remap_policy_data[4] || remap_policy_data[5]) 3814 { 3815 remap_policy[remap_count].irlSelect = 3816 DRF_VAL(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy_data[0]); 3817 3818 remap_policy[remap_count].entryValid = 3819 DRF_VAL(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy_data[0]); 3820 3821 remap_address = 3822 DRF_VAL(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_policy_data[0]); 3823 3824 remap_policy[remap_count].address = 3825 DRF_NUM64(_INGRESS, _REMAP, _ADDR_PHYS_LS10, remap_address); 3826 3827 remap_policy[remap_count].reqCtxMask = 3828 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy_data[1]); 3829 3830 remap_policy[remap_count].reqCtxChk = 3831 DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy_data[1]); 3832 3833 remap_policy[remap_count].reqCtxRep = 3834 DRF_VAL(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy_data[2]); 3835 3836 remap_policy[remap_count].addressOffset = 0; 3837 3838 address_base = 3839 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_BASE, remap_policy_data[3]) | 3840 (DRF_VAL(_INGRESS, _REMAPTABDATA5, _ADR_BASE, remap_policy_data[5]) << 3841 DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_BASE)); 3842 3843 remap_policy[remap_count].addressBase = 3844 DRF_NUM64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LS10, address_base); 3845 3846 address_limit = 3847 DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, remap_policy_data[3]) | 3848 (DRF_VAL(_INGRESS, _REMAPTABDATA5, _ADR_LIMIT, remap_policy_data[5]) << 3849 DRF_SIZE(NV_INGRESS_REMAPTABDATA3_ADR_LIMIT)); 3850 3851 remap_policy[remap_count].addressLimit = 3852 DRF_NUM64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LS10, address_limit); 3853 3854 if (params->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3855 { 3856 remap_policy[remap_count].targetId = 3857 DRF_VAL(_INGRESS, _MCREMAPTABDATA4, _MCID, remap_policy_data[4]); 3858 } 3859 else 3860 { 3861 remap_policy[remap_count].targetId = 3862 DRF_VAL(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy_data[4]); 3863 } 3864 3865 remap_policy[remap_count].flags = 3866 DRF_VAL(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy_data[4]); 3867 // Handle re-used RFUNC[5] conflict between Limerock and Laguna Seca 3868 if (remap_policy[remap_count].flags & NVBIT(5)) 3869 { 3870 remap_policy[remap_count].flags &= ~NVBIT(5); 3871 remap_policy[remap_count].flags |= NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE; 3872 } 3873 if (params->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3874 { 3875 if (FLD_TEST_DRF_NUM(_INGRESS, _MCREMAPTABDATA4, _ENB_REFLECT_MEM, 1, remap_policy_data[4])) 3876 { 3877 remap_policy[remap_count].flags |= NVSWITCH_REMAP_POLICY_FLAGS_REFLECTIVE; 3878 } 3879 } 3880 3881 remap_count++; 3882 } 3883 3884 table_index++; 3885 } 3886 3887 params->nextIndex = table_index; 3888 params->numEntries = remap_count; 3889 3890 return NVL_SUCCESS; 3891 } 3892 3893 /* 3894 * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID 3895 */ 3896 NvlStatus 3897 nvswitch_ctrl_set_remap_policy_valid_ls10 3898 ( 3899 nvswitch_device *device, 3900 NVSWITCH_SET_REMAP_POLICY_VALID *p 3901 ) 3902 { 3903 NvU32 remap_ram; 3904 NvU32 ram_address = p->firstIndex; 3905 NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LS10]; // 6 word/REMAP table entry 3906 NvU32 i; 3907 NvU32 remap_ram_sel; 3908 NvU32 ram_size; 3909 NvlStatus retval; 3910 3911 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, p->portNum)) 3912 { 3913 NVSWITCH_PRINT(device, ERROR, 3914 "%s: NPORT port #%d not valid\n", 3915 __FUNCTION__, p->portNum); 3916 return -NVL_BAD_ARGS; 3917 } 3918 3919 retval = nvswitch_get_remap_table_selector(device, p->tableSelect, &remap_ram_sel); 3920 if (retval != NVL_SUCCESS) 3921 { 3922 NVSWITCH_PRINT(device, ERROR, 3923 "Remap table #%d not supported\n", 3924 p->tableSelect); 3925 return retval; 3926 } 3927 3928 ram_size = nvswitch_get_ingress_ram_size(device, remap_ram_sel); 3929 if ((p->firstIndex >= ram_size) || 3930 (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) || 3931 (p->firstIndex + p->numEntries > ram_size)) 3932 { 3933 NVSWITCH_PRINT(device, ERROR, 3934 "%s: remapPolicy[%d..%d] overflows range %d..%d or size %d.\n", 3935 __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1, 3936 0, ram_size - 1, 3937 NVSWITCH_REMAP_POLICY_ENTRIES_MAX); 3938 return -NVL_BAD_ARGS; 3939 } 3940 3941 if (p->tableSelect == NVSWITCH_TABLE_SELECT_REMAP_MULTICAST) 3942 { 3943 for (i = 0; i < p->numEntries; i++) 3944 { 3945 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABADDR, 3946 DRF_NUM(_INGRESS, _MCREMAPTABADDR, _RAM_ADDRESS, ram_address++) | 3947 DRF_DEF(_INGRESS, _MCREMAPTABADDR, _AUTO_INCR, _DISABLE)); 3948 3949 remap_policy_data[0] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA0); 3950 remap_policy_data[1] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA1); 3951 remap_policy_data[2] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA2); 3952 remap_policy_data[3] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA3); 3953 remap_policy_data[4] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA4); 3954 remap_policy_data[5] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA5); 3955 3956 // Set valid bit in REMAPTABDATA0. 3957 remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _MCREMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]); 3958 3959 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA5, remap_policy_data[5]); 3960 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA4, remap_policy_data[4]); 3961 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA3, remap_policy_data[3]); 3962 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA2, remap_policy_data[2]); 3963 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA1, remap_policy_data[1]); 3964 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _MCREMAPTABDATA0, remap_policy_data[0]); 3965 } 3966 } 3967 else 3968 { 3969 // Select REMAP POLICY RAM and disable Auto Increment. 3970 remap_ram = 3971 DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, remap_ram_sel) | 3972 DRF_DEF(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, _DISABLE); 3973 3974 for (i = 0; i < p->numEntries; i++) 3975 { 3976 /* set the ram address */ 3977 remap_ram = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, remap_ram); 3978 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, remap_ram); 3979 3980 remap_policy_data[0] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0); 3981 remap_policy_data[1] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1); 3982 remap_policy_data[2] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2); 3983 remap_policy_data[3] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3); 3984 remap_policy_data[4] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4); 3985 remap_policy_data[5] = NVSWITCH_LINK_RD32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA5); 3986 3987 // Set valid bit in REMAPTABDATA0. 3988 remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]); 3989 3990 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA5, remap_policy_data[5]); 3991 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4, remap_policy_data[4]); 3992 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3, remap_policy_data[3]); 3993 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2, remap_policy_data[2]); 3994 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1, remap_policy_data[1]); 3995 NVSWITCH_LINK_WR32_LS10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0, remap_policy_data[0]); 3996 } 3997 } 3998 3999 return NVL_SUCCESS; 4000 } 4001 4002 NvlStatus nvswitch_ctrl_set_mc_rid_table_ls10 4003 ( 4004 nvswitch_device *device, 4005 NVSWITCH_SET_MC_RID_TABLE_PARAMS *p 4006 ) 4007 { 4008 NvlStatus ret; 4009 NVSWITCH_MC_RID_ENTRY_LS10 table_entry; 4010 NvU32 entries_used = 0; 4011 4012 if (!nvswitch_is_link_valid(device, p->portNum)) 4013 return -NVL_BAD_ARGS; 4014 4015 // check if link is invalid or repeater 4016 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, p->portNum)) 4017 { 4018 NVSWITCH_PRINT(device, ERROR, "%s: NPORT invalid for port %d\n", 4019 __FUNCTION__, p->portNum); 4020 return -NVL_BAD_ARGS; 4021 } 4022 4023 // range check index 4024 if (p->extendedTable && (p->index > NV_ROUTE_RIDTABADDR_INDEX_MCRIDEXTTAB_DEPTH)) 4025 { 4026 NVSWITCH_PRINT(device, ERROR, "%s: index %d out of range for extended table\n", 4027 __FUNCTION__, p->index); 4028 return -NVL_BAD_ARGS; 4029 } 4030 4031 if (p->index > NV_ROUTE_RIDTABADDR_INDEX_MCRIDTAB_DEPTH) 4032 { 4033 NVSWITCH_PRINT(device, ERROR, "%s: index %d out of range for main table\n", 4034 __FUNCTION__, p->index); 4035 return -NVL_BAD_ARGS; 4036 } 4037 4038 // if !entryValid, zero the table and return 4039 if (!p->entryValid) 4040 return nvswitch_mc_invalidate_mc_rid_entry_ls10(device, p->portNum, p->index, 4041 p->extendedTable, NV_TRUE); 4042 4043 // range check mcSize 4044 if ((p->mcSize == 0) || (p->mcSize > NVSWITCH_NUM_LINKS_LS10)) 4045 { 4046 NVSWITCH_PRINT(device, ERROR, "%s: mcSize %d is invalid\n", __FUNCTION__, p->mcSize); 4047 return -NVL_BAD_ARGS; 4048 } 4049 4050 // extended table cannot have an extended ptr 4051 if (p->extendedTable && p->extendedValid) 4052 { 4053 NVSWITCH_PRINT(device, ERROR, "%s: extendedTable cannot have an extendedValid ptr\n", 4054 __FUNCTION__); 4055 return -NVL_BAD_ARGS; 4056 } 4057 4058 // set up table entry fields 4059 table_entry.index = (NvU8)p->index; 4060 table_entry.use_extended_table = p->extendedTable; 4061 table_entry.mcpl_size = (NvU8)p->mcSize; 4062 table_entry.num_spray_groups = (NvU8)p->numSprayGroups; 4063 table_entry.ext_ptr = (NvU8)p->extendedPtr; 4064 table_entry.no_dyn_rsp = p->noDynRsp; 4065 table_entry.ext_ptr_valid = p->extendedValid; 4066 table_entry.valid = p->entryValid; 4067 4068 // build the directive list, remaining range checks are performed inside 4069 ret = nvswitch_mc_build_mcp_list_ls10(device, p->ports, p->portsPerSprayGroup, p->replicaOffset, 4070 p->replicaValid, p->vcHop, &table_entry, &entries_used); 4071 4072 NVSWITCH_PRINT(device, INFO, "nvswitch_mc_build_mcp_list_ls10() returned %d, entries used: %d\n", 4073 ret, entries_used); 4074 4075 if (ret != NVL_SUCCESS) 4076 return ret; 4077 4078 // program the table 4079 ret = nvswitch_mc_program_mc_rid_entry_ls10(device, p->portNum, &table_entry, entries_used); 4080 4081 return ret; 4082 } 4083 4084 NvlStatus nvswitch_ctrl_get_mc_rid_table_ls10 4085 ( 4086 nvswitch_device *device, 4087 NVSWITCH_GET_MC_RID_TABLE_PARAMS *p 4088 ) 4089 { 4090 NvU32 ret; 4091 NVSWITCH_MC_RID_ENTRY_LS10 table_entry; 4092 NvU32 port = p->portNum; 4093 4094 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NPORT, port)) 4095 { 4096 NVSWITCH_PRINT(device, ERROR, "%s: NPORT invalid for port %d\n", 4097 __FUNCTION__, port); 4098 return -NVL_BAD_ARGS; 4099 } 4100 4101 // range check index 4102 if (p->extendedTable && (p->index > NV_ROUTE_RIDTABADDR_INDEX_MCRIDEXTTAB_DEPTH)) 4103 { 4104 NVSWITCH_PRINT(device, ERROR, "%s: index %d out of range for extended table\n", 4105 __FUNCTION__, p->index); 4106 return -NVL_BAD_ARGS; 4107 } 4108 4109 if (p->index > NV_ROUTE_RIDTABADDR_INDEX_MCRIDTAB_DEPTH) 4110 { 4111 NVSWITCH_PRINT(device, ERROR, "%s: index %d out of range for main table\n", 4112 __FUNCTION__, p->index); 4113 return -NVL_BAD_ARGS; 4114 } 4115 4116 nvswitch_os_memset(&table_entry, 0, sizeof(NVSWITCH_MC_RID_ENTRY_LS10)); 4117 4118 table_entry.index = (NvU8)p->index; 4119 table_entry.use_extended_table = p->extendedTable; 4120 4121 ret = nvswitch_mc_read_mc_rid_entry_ls10(device, port, &table_entry); 4122 if (ret != NVL_SUCCESS) 4123 { 4124 NVSWITCH_PRINT(device, ERROR, "%s: nvswitch_mc_read_mc_rid_entry_ls10() returned %d\n", 4125 __FUNCTION__, ret); 4126 return ret; 4127 } 4128 4129 nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_MC_RID_TABLE_PARAMS)); 4130 4131 p->portNum = port; 4132 p->index = table_entry.index; 4133 p->extendedTable = table_entry.use_extended_table; 4134 4135 ret = nvswitch_mc_unwind_directives_ls10(device, table_entry.directives, p->ports, 4136 p->vcHop, p->portsPerSprayGroup, p->replicaOffset, 4137 p->replicaValid); 4138 if (ret != NVL_SUCCESS) 4139 { 4140 NVSWITCH_PRINT(device, ERROR, "%s: nvswitch_mc_unwind_directives_ls10() returned %d\n", 4141 __FUNCTION__, ret); 4142 return ret; 4143 } 4144 4145 p->mcSize = table_entry.mcpl_size; 4146 p->numSprayGroups = table_entry.num_spray_groups; 4147 p->extendedPtr = table_entry.ext_ptr; 4148 p->noDynRsp = table_entry.no_dyn_rsp; 4149 p->extendedValid = table_entry.ext_ptr_valid; 4150 p->entryValid = table_entry.valid; 4151 4152 return NVL_SUCCESS; 4153 } 4154 4155 NvlStatus 4156 nvswitch_write_fabric_state_ls10 4157 ( 4158 nvswitch_device *device 4159 ) 4160 { 4161 NvU32 reg; 4162 4163 if (device == NULL) 4164 { 4165 NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__); 4166 return -NVL_BAD_ARGS; 4167 } 4168 4169 // bump the sequence number for each write 4170 device->fabric_state_sequence_number++; 4171 4172 reg = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _DRIVER_ATTACH_DETACH); 4173 4174 reg = FLD_SET_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _DEVICE_BLACKLIST_REASON, 4175 device->device_blacklist_reason, reg); 4176 reg = FLD_SET_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _DEVICE_FABRIC_STATE, 4177 device->device_fabric_state, reg); 4178 reg = FLD_SET_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _DRIVER_FABRIC_STATE, 4179 device->driver_fabric_state, reg); 4180 reg = FLD_SET_DRF_NUM(_NVLSAW, _DRIVER_ATTACH_DETACH, _EVENT_MESSAGE_COUNT, 4181 device->fabric_state_sequence_number, reg); 4182 4183 NVSWITCH_SAW_WR32_LS10(device, _NVLSAW, _DRIVER_ATTACH_DETACH, reg); 4184 4185 return NVL_SUCCESS; 4186 } 4187 4188 static NVSWITCH_ENGINE_DESCRIPTOR_TYPE * 4189 _nvswitch_get_eng_descriptor_ls10 4190 ( 4191 nvswitch_device *device, 4192 NVSWITCH_ENGINE_ID eng_id 4193 ) 4194 { 4195 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 4196 NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = NULL; 4197 4198 if (eng_id >= NVSWITCH_ENGINE_ID_SIZE) 4199 { 4200 NVSWITCH_PRINT(device, ERROR, 4201 "%s: Engine_ID 0x%x out of range 0..0x%x\n", 4202 __FUNCTION__, 4203 eng_id, NVSWITCH_ENGINE_ID_SIZE-1); 4204 return NULL; 4205 } 4206 4207 engine = &(chip_device->io.common[eng_id]); 4208 if (eng_id != engine->eng_id) 4209 { 4210 NVSWITCH_PRINT(device, ERROR, 4211 "%s: Requested Engine_ID 0x%x does not equal found Engine_ID 0x%x (%s)\n", 4212 __FUNCTION__, 4213 eng_id, engine->eng_id, engine->eng_name); 4214 } 4215 NVSWITCH_ASSERT(eng_id == engine->eng_id); 4216 4217 return engine; 4218 } 4219 4220 NvU32 4221 nvswitch_get_eng_base_ls10 4222 ( 4223 nvswitch_device *device, 4224 NVSWITCH_ENGINE_ID eng_id, 4225 NvU32 eng_bcast, 4226 NvU32 eng_instance 4227 ) 4228 { 4229 NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine; 4230 NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; 4231 4232 engine = _nvswitch_get_eng_descriptor_ls10(device, eng_id); 4233 if (engine == NULL) 4234 { 4235 NVSWITCH_PRINT(device, ERROR, 4236 "%s: ID 0x%x[%d] %s not found\n", 4237 __FUNCTION__, 4238 eng_id, eng_instance, 4239 ( 4240 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : 4241 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : 4242 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : 4243 "??" 4244 )); 4245 return NVSWITCH_BASE_ADDR_INVALID; 4246 } 4247 4248 if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) && 4249 (eng_instance < engine->eng_count)) 4250 { 4251 base_addr = engine->uc_addr[eng_instance]; 4252 } 4253 else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) 4254 { 4255 base_addr = engine->bc_addr; 4256 } 4257 else if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) && 4258 (eng_instance < engine->mc_addr_count)) 4259 { 4260 base_addr = engine->mc_addr[eng_instance]; 4261 } 4262 else 4263 { 4264 NVSWITCH_PRINT(device, ERROR, 4265 "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n", 4266 __FUNCTION__, 4267 eng_bcast); 4268 } 4269 4270 // The NPORT engine can be marked as invalid when it is in Repeater Mode 4271 if (base_addr == NVSWITCH_BASE_ADDR_INVALID) 4272 { 4273 NVSWITCH_PRINT(device, INFO, 4274 "%s: ID 0x%x[%d] %s invalid address\n", 4275 __FUNCTION__, 4276 eng_id, eng_instance, 4277 ( 4278 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : 4279 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : 4280 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : 4281 "??" 4282 )); 4283 } 4284 4285 return base_addr; 4286 } 4287 4288 NvU32 4289 nvswitch_get_eng_count_ls10 4290 ( 4291 nvswitch_device *device, 4292 NVSWITCH_ENGINE_ID eng_id, 4293 NvU32 eng_bcast 4294 ) 4295 { 4296 NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine; 4297 NvU32 eng_count = 0; 4298 4299 engine = _nvswitch_get_eng_descriptor_ls10(device, eng_id); 4300 if (engine == NULL) 4301 { 4302 NVSWITCH_PRINT(device, ERROR, 4303 "%s: ID 0x%x %s not found\n", 4304 __FUNCTION__, 4305 eng_id, 4306 ( 4307 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : 4308 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : 4309 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : 4310 "??" 4311 )); 4312 return 0; 4313 } 4314 4315 if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) 4316 { 4317 eng_count = engine->eng_count; 4318 } 4319 else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) 4320 { 4321 if (engine->bc_addr == NVSWITCH_BASE_ADDR_INVALID) 4322 { 4323 eng_count = 0; 4324 } 4325 else 4326 { 4327 eng_count = 1; 4328 } 4329 } 4330 else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) 4331 { 4332 eng_count = engine->mc_addr_count; 4333 } 4334 else 4335 { 4336 NVSWITCH_PRINT(device, ERROR, 4337 "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n", 4338 __FUNCTION__, 4339 eng_bcast); 4340 } 4341 4342 return eng_count; 4343 } 4344 4345 NvU32 4346 nvswitch_eng_rd_ls10 4347 ( 4348 nvswitch_device *device, 4349 NVSWITCH_ENGINE_ID eng_id, 4350 NvU32 eng_bcast, 4351 NvU32 eng_instance, 4352 NvU32 offset 4353 ) 4354 { 4355 NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; 4356 NvU32 data; 4357 4358 base_addr = nvswitch_get_eng_base_ls10(device, eng_id, eng_bcast, eng_instance); 4359 if (base_addr == NVSWITCH_BASE_ADDR_INVALID) 4360 { 4361 NVSWITCH_PRINT(device, ERROR, 4362 "%s: ID 0x%x[%d] %s invalid address\n", 4363 __FUNCTION__, 4364 eng_id, eng_instance, 4365 ( 4366 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : 4367 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : 4368 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : 4369 "??" 4370 )); 4371 NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID); 4372 return 0xBADFBADF; 4373 } 4374 4375 data = nvswitch_reg_read_32(device, base_addr + offset); 4376 4377 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) 4378 { 4379 NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = _nvswitch_get_eng_descriptor_ls10(device, eng_id); 4380 4381 NVSWITCH_PRINT(device, MMIO, 4382 "%s: ENG_RD %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n", 4383 __FUNCTION__, 4384 engine->eng_name, engine->eng_id, 4385 eng_instance, 4386 base_addr, offset, 4387 data); 4388 } 4389 #endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) 4390 4391 return data; 4392 } 4393 4394 void 4395 nvswitch_eng_wr_ls10 4396 ( 4397 nvswitch_device *device, 4398 NVSWITCH_ENGINE_ID eng_id, 4399 NvU32 eng_bcast, 4400 NvU32 eng_instance, 4401 NvU32 offset, 4402 NvU32 data 4403 ) 4404 { 4405 NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; 4406 4407 base_addr = nvswitch_get_eng_base_ls10(device, eng_id, eng_bcast, eng_instance); 4408 if (base_addr == NVSWITCH_BASE_ADDR_INVALID) 4409 { 4410 NVSWITCH_PRINT(device, ERROR, 4411 "%s: ID 0x%x[%d] %s invalid address\n", 4412 __FUNCTION__, 4413 eng_id, eng_instance, 4414 ( 4415 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : 4416 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : 4417 (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : 4418 "??" 4419 )); 4420 NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID); 4421 return; 4422 } 4423 4424 nvswitch_reg_write_32(device, base_addr + offset, data); 4425 4426 #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) 4427 { 4428 NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = _nvswitch_get_eng_descriptor_ls10(device, eng_id); 4429 4430 NVSWITCH_PRINT(device, MMIO, 4431 "%s: ENG_WR %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n", 4432 __FUNCTION__, 4433 engine->eng_name, engine->eng_id, 4434 eng_instance, 4435 base_addr, offset, 4436 data); 4437 } 4438 #endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) 4439 } 4440 4441 NvU32 4442 nvswitch_get_link_eng_inst_ls10 4443 ( 4444 nvswitch_device *device, 4445 NvU32 link_id, 4446 NVSWITCH_ENGINE_ID eng_id 4447 ) 4448 { 4449 NvU32 eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID; 4450 4451 if (link_id >= NVSWITCH_LINK_COUNT(device)) 4452 { 4453 NVSWITCH_PRINT(device, ERROR, 4454 "%s: link ID 0x%x out-of-range [0x0..0x%x]\n", 4455 __FUNCTION__, 4456 link_id, NVSWITCH_LINK_COUNT(device)-1); 4457 return NVSWITCH_ENGINE_INSTANCE_INVALID; 4458 } 4459 4460 switch (eng_id) 4461 { 4462 case NVSWITCH_ENGINE_ID_NPG: 4463 eng_instance = link_id / NVSWITCH_LINKS_PER_NPG_LS10; 4464 break; 4465 case NVSWITCH_ENGINE_ID_NVLIPT: 4466 eng_instance = link_id / NVSWITCH_LINKS_PER_NVLIPT_LS10; 4467 break; 4468 case NVSWITCH_ENGINE_ID_NVLW: 4469 case NVSWITCH_ENGINE_ID_NVLW_PERFMON: 4470 eng_instance = link_id / NVSWITCH_LINKS_PER_NVLW_LS10; 4471 break; 4472 case NVSWITCH_ENGINE_ID_MINION: 4473 eng_instance = link_id / NVSWITCH_LINKS_PER_MINION_LS10; 4474 break; 4475 case NVSWITCH_ENGINE_ID_NPORT: 4476 case NVSWITCH_ENGINE_ID_NVLTLC: 4477 case NVSWITCH_ENGINE_ID_NVLDL: 4478 case NVSWITCH_ENGINE_ID_NVLIPT_LNK: 4479 case NVSWITCH_ENGINE_ID_NPORT_PERFMON: 4480 eng_instance = link_id; 4481 break; 4482 default: 4483 NVSWITCH_PRINT(device, ERROR, 4484 "%s: link ID 0x%x has no association with EngID 0x%x\n", 4485 __FUNCTION__, 4486 link_id, eng_id); 4487 eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID; 4488 break; 4489 } 4490 4491 return eng_instance; 4492 } 4493 4494 NvU32 4495 nvswitch_get_caps_nvlink_version_ls10 4496 ( 4497 nvswitch_device *device 4498 ) 4499 { 4500 ct_assert(NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_4_0 == 4501 NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_4_0); 4502 return NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_4_0; 4503 } 4504 4505 4506 NVSWITCH_BIOS_NVLINK_CONFIG * 4507 nvswitch_get_bios_nvlink_config_ls10 4508 ( 4509 nvswitch_device *device 4510 ) 4511 { 4512 ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); 4513 4514 return &chip_device->bios_config; 4515 } 4516 4517 4518 static void 4519 _nvswitch_init_nport_ecc_control_ls10 4520 ( 4521 nvswitch_device *device 4522 ) 4523 { 4524 // Moving this L2 register access to SOE. Refer bug #3747687 4525 #if 0 4526 // Set ingress ECC error limits 4527 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, 4528 DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4529 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, 1); 4530 4531 // Set egress ECC error limits 4532 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, 4533 DRF_NUM(_EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4534 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, 1); 4535 4536 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, 4537 DRF_NUM(_EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4538 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, 1); 4539 4540 // Set route ECC error limits 4541 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, 4542 DRF_NUM(_ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4543 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER_LIMIT, 1); 4544 4545 // Set tstate ECC error limits 4546 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, 4547 DRF_NUM(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4548 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1); 4549 4550 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, 4551 DRF_NUM(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4552 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, 1); 4553 4554 // Set sourcetrack ECC error limits to _PROD value 4555 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 4556 DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); 4557 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1); 4558 4559 // Enable ECC/parity 4560 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _INGRESS, _ERR_ECC_CTRL, 4561 DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_HDR_ECC_ENABLE, __PROD) | 4562 DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD) | 4563 DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _REMAPTAB_ECC_ENABLE, __PROD) | 4564 DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RIDTAB_ECC_ENABLE, __PROD) | 4565 DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RLANTAB_ECC_ENABLE, __PROD)); 4566 4567 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _EGRESS, _ERR_ECC_CTRL, 4568 DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_ECC_ENABLE, __PROD) | 4569 DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_PARITY_ENABLE, __PROD) | 4570 DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _RAM_OUT_ECC_ENABLE, __PROD) | 4571 DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_ECC_ENABLE, __PROD) | 4572 DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD)); 4573 4574 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _ROUTE, _ERR_ECC_CTRL, 4575 DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _GLT_ECC_ENABLE, __PROD) | 4576 DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _NVS_ECC_ENABLE, __PROD)); 4577 4578 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _TSTATE, _ERR_ECC_CTRL, 4579 DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _CRUMBSTORE_ECC_ENABLE, __PROD) | 4580 DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TAGPOOL_ECC_ENABLE, __PROD)); 4581 4582 NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _SOURCETRACK, _ERR_ECC_CTRL, 4583 DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD)); 4584 #endif // 0 4585 } 4586 4587 NvlStatus 4588 nvswitch_init_nport_ls10 4589 ( 4590 nvswitch_device *device 4591 ) 4592 { 4593 NvU32 data32, timeout; 4594 NvU32 idx_nport; 4595 NvU32 num_nports; 4596 4597 num_nports = NVSWITCH_ENG_COUNT(device, NPORT, ); 4598 4599 for (idx_nport = 0; idx_nport < num_nports; idx_nport++) 4600 { 4601 // Find the first valid nport 4602 if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport)) 4603 { 4604 break; 4605 } 4606 } 4607 4608 // This is a valid case, since all NPORTs can be in Repeater mode. 4609 if (idx_nport == num_nports) 4610 { 4611 NVSWITCH_PRINT(device, INFO, "%s: No valid nports found! Skipping.\n", __FUNCTION__); 4612 return NVL_SUCCESS; 4613 } 4614 4615 _nvswitch_init_nport_ecc_control_ls10(device); 4616 4617 // Moving this L2 register access to SOE. Refer bug #3747687 4618 #if 0 4619 if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) == 4620 NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE) 4621 { 4622 // ATO Disable 4623 data32 = NVSWITCH_NPORT_RD32_LS10(device, idx_nport, _TSTATE, _TAGSTATECONTROL); 4624 data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _OFF, data32); 4625 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _TSTATE, _TAGSTATECONTROL, data32); 4626 } 4627 else 4628 { 4629 // ATO Enable 4630 data32 = NVSWITCH_NPORT_RD32_LS10(device, idx_nport, _TSTATE, _TAGSTATECONTROL); 4631 data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _ON, data32); 4632 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _TSTATE, _TAGSTATECONTROL, data32); 4633 4634 // ATO Timeout value 4635 timeout = DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _TIMEOUT, device->regkeys.ato_control); 4636 if (timeout != NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT) 4637 { 4638 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _TSTATE, _ATO_TIMER_LIMIT, 4639 DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout)); 4640 } 4641 } 4642 #endif // 0 4643 if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) == 4644 NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE) 4645 { 4646 // STO Disable 4647 data32 = NVSWITCH_NPORT_RD32_LS10(device, idx_nport, _SOURCETRACK, _CTRL); 4648 data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _DISABLED, data32); 4649 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _SOURCETRACK, _CTRL, data32); 4650 } 4651 else 4652 { 4653 // STO Enable 4654 data32 = NVSWITCH_NPORT_RD32_LS10(device, idx_nport, _SOURCETRACK, _CTRL); 4655 data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _ENABLED, data32); 4656 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _SOURCETRACK, _CTRL, data32); 4657 4658 // STO Timeout value 4659 timeout = DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _TIMEOUT, device->regkeys.sto_control); 4660 if (timeout != NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT) 4661 { 4662 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _SOURCETRACK, _MULTISEC_TIMER0, 4663 DRF_NUM(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, timeout)); 4664 } 4665 } 4666 4667 NVSWITCH_NPORT_MC_BCAST_WR32_LS10(device, _NPORT, _CONTAIN_AND_DRAIN, 4668 DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE)); 4669 4670 return NVL_SUCCESS; 4671 } 4672 4673 NvlStatus 4674 nvswitch_init_nxbar_ls10 4675 ( 4676 nvswitch_device *device 4677 ) 4678 { 4679 NVSWITCH_PRINT(device, WARN, "%s: Function not implemented\n", __FUNCTION__); 4680 return NVL_SUCCESS; 4681 } 4682 4683 static NvlStatus 4684 nvswitch_clear_nport_rams_ls10 4685 ( 4686 nvswitch_device *device 4687 ) 4688 { 4689 NvU32 idx_nport; 4690 NvU64 nport_mask = 0; 4691 NvU32 zero_init_mask; 4692 NvU32 val; 4693 NVSWITCH_TIMEOUT timeout; 4694 NvBool keepPolling; 4695 4696 // Build the mask of available NPORTs 4697 for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++) 4698 { 4699 if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport)) 4700 { 4701 nport_mask |= NVBIT64(idx_nport); 4702 } 4703 } 4704 4705 // Start the HW zero init 4706 zero_init_mask = 4707 DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT) | 4708 DRF_DEF(_NPORT, _INITIALIZATION, _LINKTABLEINIT, _HWINIT) | 4709 DRF_DEF(_NPORT, _INITIALIZATION, _REMAPTABINIT, _HWINIT) | 4710 DRF_DEF(_NPORT, _INITIALIZATION, _RIDTABINIT, _HWINIT) | 4711 DRF_DEF(_NPORT, _INITIALIZATION, _RLANTABINIT, _HWINIT) | 4712 DRF_DEF(_NPORT, _INITIALIZATION, _MCREMAPTABINIT, _HWINIT) | 4713 DRF_DEF(_NPORT, _INITIALIZATION, _MCTAGSTATEINIT, _HWINIT) | 4714 DRF_DEF(_NPORT, _INITIALIZATION, _RDTAGSTATEINIT, _HWINIT) | 4715 DRF_DEF(_NPORT, _INITIALIZATION, _MCREDSGTINIT, _HWINIT) | 4716 DRF_DEF(_NPORT, _INITIALIZATION, _MCREDBUFINIT, _HWINIT) | 4717 DRF_DEF(_NPORT, _INITIALIZATION, _MCRIDINIT, _HWINIT) | 4718 DRF_DEF(_NPORT, _INITIALIZATION, _EXTMCRIDINIT, _HWINIT); 4719 4720 NVSWITCH_BCAST_WR32_LS10(device, NPORT, _NPORT, _INITIALIZATION, 4721 zero_init_mask); 4722 4723 nvswitch_timeout_create(25 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); 4724 4725 do 4726 { 4727 keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 4728 4729 // Check each enabled NPORT that is still pending until all are done 4730 for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++) 4731 { 4732 if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport) && (nport_mask & NVBIT64(idx_nport))) 4733 { 4734 val = NVSWITCH_ENG_RD32_LS10(device, NPORT, idx_nport, _NPORT, _INITIALIZATION); 4735 if (val == zero_init_mask) 4736 { 4737 nport_mask &= ~NVBIT64(idx_nport); 4738 } 4739 } 4740 } 4741 4742 if (nport_mask == 0) 4743 { 4744 break; 4745 } 4746 4747 nvswitch_os_sleep(1); 4748 } 4749 while (keepPolling); 4750 4751 if (nport_mask != 0) 4752 { 4753 NVSWITCH_PRINT(device, WARN, 4754 "%s: Timeout waiting for NV_NPORT_INITIALIZATION (0x%llx)\n", 4755 __FUNCTION__, nport_mask); 4756 return -NVL_ERR_INVALID_STATE; 4757 } 4758 4759 return NVL_SUCCESS; 4760 } 4761 4762 /* 4763 * CTRL_NVSWITCH_SET_RESIDENCY_BINS 4764 */ 4765 static NvlStatus 4766 nvswitch_ctrl_set_residency_bins_ls10 4767 ( 4768 nvswitch_device *device, 4769 NVSWITCH_SET_RESIDENCY_BINS *p 4770 ) 4771 { 4772 NvU64 threshold; 4773 NvU64 max_threshold; 4774 4775 if (p->bin.lowThreshold > p->bin.hiThreshold ) 4776 { 4777 NVSWITCH_PRINT(device, ERROR, 4778 "SET_RESIDENCY_BINS: Low threshold (%d) > Hi threshold (%d)\n", 4779 p->bin.lowThreshold, p->bin.hiThreshold); 4780 return -NVL_BAD_ARGS; 4781 } 4782 4783 if (p->table_select == NVSWITCH_TABLE_SELECT_MULTICAST) 4784 { 4785 max_threshold = DRF_MASK(NV_MULTICASTTSTATE_STAT_RESIDENCY_BIN_CTRL_HIGH_LIMIT); 4786 4787 threshold = (NvU64) p->bin.hiThreshold * 1333 / 1000; 4788 if (threshold > max_threshold) 4789 { 4790 NVSWITCH_PRINT(device, ERROR, 4791 "SET_RESIDENCY_BINS: Threshold overflow. %u > %llu max\n", 4792 p->bin.hiThreshold, max_threshold * 1000 / 1333); 4793 return -NVL_BAD_ARGS; 4794 } 4795 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, 4796 DRF_NUM(_MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, _LIMIT, (NvU32)threshold)); 4797 4798 threshold = (NvU64)p->bin.lowThreshold * 1333 / 1000; 4799 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, 4800 DRF_NUM(_MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, _LIMIT, (NvU32)threshold)); 4801 } 4802 else if (p->table_select == NVSWITCH_TABLE_SELECT_REDUCTION) 4803 { 4804 max_threshold = DRF_MASK(NV_REDUCTIONTSTATE_STAT_RESIDENCY_BIN_CTRL_HIGH_LIMIT); 4805 4806 threshold = (NvU64) p->bin.hiThreshold * 1333 / 1000; 4807 if (threshold > max_threshold) 4808 { 4809 NVSWITCH_PRINT(device, ERROR, 4810 "SET_RESIDENCY_BINS: Threshold overflow. %u > %llu max\n", 4811 p->bin.hiThreshold, max_threshold * 1000 / 1333); 4812 return -NVL_BAD_ARGS; 4813 } 4814 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, 4815 DRF_NUM(_REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, _LIMIT, (NvU32)threshold)); 4816 4817 threshold = (NvU64)p->bin.lowThreshold * 1333 / 1000; 4818 NVSWITCH_NPORT_BCAST_WR32_LS10(device, _REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, 4819 DRF_NUM(_REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, _LIMIT, (NvU32)threshold)); 4820 } 4821 else 4822 { 4823 NVSWITCH_PRINT(device, ERROR, 4824 "SET_RESIDENCY_BINS: Invalid table %d\n", p->table_select); 4825 return -NVL_ERR_NOT_SUPPORTED; 4826 } 4827 4828 return NVL_SUCCESS; 4829 } 4830 4831 #define NVSWITCH_RESIDENCY_BIN_SIZE \ 4832 ((NV_MULTICASTTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MAX + 1) / \ 4833 NV_MULTICASTTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MCID_STRIDE) 4834 4835 /* 4836 * CTRL_NVSWITCH_GET_RESIDENCY_BINS 4837 */ 4838 static NvlStatus 4839 nvswitch_ctrl_get_residency_bins_ls10 4840 ( 4841 nvswitch_device *device, 4842 NVSWITCH_GET_RESIDENCY_BINS *p 4843 ) 4844 { 4845 NvU64 val; 4846 NvU64 val_hi; 4847 NvU32 i; 4848 NvU64 threshold; 4849 4850 ct_assert( 4851 NV_MULTICASTTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MCID_STRIDE == 4852 NV_REDUCTIONTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MCID_STRIDE); 4853 ct_assert( 4854 NV_MULTICASTTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MAX == 4855 NV_REDUCTIONTSTATE_STAT_RESIDENCY_COUNT_CTRL_INDEX_MAX); 4856 4857 ct_assert(NVSWITCH_RESIDENCY_BIN_SIZE == NVSWITCH_RESIDENCY_SIZE); 4858 4859 if (!nvswitch_is_link_valid(device, p->link)) 4860 { 4861 NVSWITCH_PRINT(device, ERROR, 4862 "GET_RESIDENCY_BINS: Invalid link %d\n", p->link); 4863 return -NVL_BAD_ARGS; 4864 } 4865 4866 if (p->table_select == NVSWITCH_TABLE_SELECT_MULTICAST) 4867 { 4868 // Snap the histogram 4869 NVSWITCH_NPORT_WR32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, 4870 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 4871 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 4872 4873 // Read high/low thresholds and convery clocks to nsec 4874 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW); 4875 threshold = DRF_VAL(_MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, _LIMIT, val); 4876 p->bin.lowThreshold = threshold * 1000 / 1333; 4877 4878 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH); 4879 threshold = DRF_VAL(_MULTICASTTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, _LIMIT, val); 4880 p->bin.hiThreshold = threshold * 1000 / 1333; 4881 4882 NVSWITCH_NPORT_WR32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_CTRL, 4883 DRF_NUM(_MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_CTRL, _INDEX, 0) | 4884 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_CTRL, _AUTOINCR, _ON)); 4885 for (i = 0; i < NVSWITCH_RESIDENCY_BIN_SIZE; i++) 4886 { 4887 // Low 4888 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4889 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4890 p->residency[i].low = (val_hi << 32) | val; 4891 4892 // Medium 4893 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4894 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4895 p->residency[i].medium = (val_hi << 32) | val; 4896 4897 // High 4898 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4899 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4900 p->residency[i].high = (val_hi << 32) | val; 4901 } 4902 4903 // Reset the histogram 4904 NVSWITCH_NPORT_WR32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, 4905 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 4906 DRF_DEF(_MULTICASTTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 4907 4908 } 4909 else if (p->table_select == NVSWITCH_TABLE_SELECT_REDUCTION) 4910 { 4911 // Snap the histogram 4912 NVSWITCH_NPORT_WR32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, 4913 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 4914 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 4915 4916 // Read high/low thresholds and convery clocks to nsec 4917 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW); 4918 threshold = DRF_VAL(_REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_LOW, _LIMIT, val); 4919 p->bin.lowThreshold = threshold * 1000 / 1333; 4920 4921 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH); 4922 threshold = DRF_VAL(_REDUCTIONTSTATE, _STAT_RESIDENCY_BIN_CTRL_HIGH, _LIMIT, val); 4923 p->bin.hiThreshold = threshold * 1000 / 1333; 4924 4925 NVSWITCH_NPORT_WR32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_CTRL, 4926 DRF_NUM(_REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_CTRL, _INDEX, 0) | 4927 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_CTRL, _AUTOINCR, _ON)); 4928 for (i = 0; i < NVSWITCH_RESIDENCY_BIN_SIZE; i++) 4929 { 4930 // Low 4931 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4932 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4933 p->residency[i].low = (val_hi << 32) | val; 4934 4935 // Medium 4936 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4937 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4938 p->residency[i].medium = (val_hi << 32) | val; 4939 4940 // High 4941 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4942 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_COUNT_DATA); 4943 p->residency[i].high = (val_hi << 32) | val; 4944 } 4945 4946 // Reset the histogram 4947 NVSWITCH_NPORT_WR32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, 4948 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _ENABLE_TIMER, _ENABLE) | 4949 DRF_DEF(_REDUCTIONTSTATE, _STAT_RESIDENCY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 4950 } 4951 else 4952 { 4953 NVSWITCH_PRINT(device, ERROR, 4954 "GET_RESIDENCY_BINS: Invalid table %d\n", p->table_select); 4955 return -NVL_ERR_NOT_SUPPORTED; 4956 } 4957 4958 return NVL_SUCCESS; 4959 } 4960 4961 /* 4962 * CTRL_NVSWITCH_GET_RB_STALL_BUSY 4963 */ 4964 static NvlStatus 4965 nvswitch_ctrl_get_rb_stall_busy_ls10 4966 ( 4967 nvswitch_device *device, 4968 NVSWITCH_GET_RB_STALL_BUSY *p 4969 ) 4970 { 4971 NvU64 val; 4972 NvU64 val_hi; 4973 4974 if (!nvswitch_is_link_valid(device, p->link)) 4975 { 4976 NVSWITCH_PRINT(device, ERROR, 4977 "GET_RB_STALL_BUSY: Invalid link %d\n", p->link); 4978 return -NVL_BAD_ARGS; 4979 } 4980 4981 if (p->table_select == NVSWITCH_TABLE_SELECT_MULTICAST) 4982 { 4983 // Snap the histogram 4984 NVSWITCH_NPORT_WR32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, 4985 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 4986 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 4987 4988 // 4989 // VC0 4990 // 4991 4992 // Total time 4993 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_WINDOW_0_LOW); 4994 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_WINDOW_0_HIGH); 4995 p->vc0.time = ((val_hi << 32) | val) * 1000 / 1333; // in ns 4996 4997 // Busy 4998 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_BUSY_TIMER_0_LOW); 4999 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_BUSY_TIMER_0_HIGH); 5000 p->vc0.busy = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5001 5002 // Stall 5003 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_TIMER_0_LOW); 5004 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_TIMER_0_HIGH); 5005 p->vc0.stall = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5006 5007 // 5008 // VC1 5009 // 5010 5011 // Total time 5012 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_WINDOW_1_LOW); 5013 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_WINDOW_1_HIGH); 5014 p->vc1.time = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5015 5016 // Busy 5017 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_BUSY_TIMER_1_LOW); 5018 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_BUSY_TIMER_1_HIGH); 5019 p->vc1.busy = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5020 5021 // Stall 5022 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_TIMER_1_LOW); 5023 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_TIMER_1_HIGH); 5024 p->vc1.stall = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5025 5026 // Reset the busy/stall counters 5027 NVSWITCH_NPORT_WR32_LS10(device, p->link, _MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, 5028 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 5029 DRF_DEF(_MULTICASTTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 5030 } 5031 else if (p->table_select == NVSWITCH_TABLE_SELECT_REDUCTION) 5032 { 5033 // Snap the histogram 5034 NVSWITCH_NPORT_WR32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, 5035 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 5036 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _ENABLE)); 5037 // 5038 // VC0 5039 // 5040 5041 // Total time 5042 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_WINDOW_0_LOW); 5043 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_WINDOW_0_HIGH); 5044 p->vc0.time = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5045 5046 // Busy 5047 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_BUSY_TIMER_0_LOW); 5048 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_BUSY_TIMER_0_HIGH); 5049 p->vc0.busy = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5050 5051 // Stall 5052 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_TIMER_0_LOW); 5053 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_TIMER_0_HIGH); 5054 p->vc0.stall = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5055 5056 // 5057 // VC1 5058 // 5059 5060 // Total time 5061 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_WINDOW_1_LOW); 5062 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_WINDOW_1_HIGH); 5063 p->vc1.time = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5064 5065 // Busy 5066 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_BUSY_TIMER_1_LOW); 5067 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_BUSY_TIMER_1_HIGH); 5068 p->vc1.busy = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5069 5070 // Stall 5071 val = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_TIMER_1_LOW); 5072 val_hi = NVSWITCH_NPORT_RD32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_TIMER_1_HIGH); 5073 p->vc1.stall = ((val_hi << 32) | val) * 1000 / 1333; // in ns 5074 5075 // Reset the histogram 5076 NVSWITCH_NPORT_WR32_LS10(device, p->link, _REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, 5077 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _ENABLE_TIMER, _ENABLE) | 5078 DRF_DEF(_REDUCTIONTSTATE, _STAT_STALL_BUSY_CONTROL, _SNAP_ON_DEMAND, _DISABLE)); 5079 } 5080 else 5081 { 5082 NVSWITCH_PRINT(device, ERROR, 5083 "GET_RB_STALL_BUSY: Invalid table %d\n", p->table_select); 5084 return -NVL_ERR_NOT_SUPPORTED; 5085 } 5086 5087 return NVL_SUCCESS; 5088 } 5089 5090 /* 5091 * CTRL_NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR 5092 */ 5093 static NvlStatus 5094 nvswitch_ctrl_get_multicast_id_error_vector_ls10 5095 ( 5096 nvswitch_device *device, 5097 NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR *p 5098 ) 5099 { 5100 NvU32 i; 5101 5102 ct_assert(NV_NPORT_MCID_ERROR_VECTOR__SIZE_1 == (NVSWITCH_MC_ID_ERROR_VECTOR_COUNT / 32)); 5103 5104 if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->link, NPORT)) 5105 { 5106 NVSWITCH_PRINT(device, ERROR, 5107 "GET_MULTICAST_ID_ERROR_VECTOR: Invalid link %d\n", p->link); 5108 return -NVL_BAD_ARGS; 5109 } 5110 5111 for (i = 0; i < NV_NPORT_MCID_ERROR_VECTOR__SIZE_1; i++) 5112 { 5113 p->error_vector[i] = NVSWITCH_LINK_RD32(device, p->link, NPORT, _NPORT, 5114 _MCID_ERROR_VECTOR(i)); 5115 } 5116 5117 return NVL_SUCCESS; 5118 } 5119 5120 /* 5121 * CTRL_NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR 5122 */ 5123 static NvlStatus 5124 nvswitch_ctrl_clear_multicast_id_error_vector_ls10 5125 ( 5126 nvswitch_device *device, 5127 NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR *p 5128 ) 5129 { 5130 NvU32 i; 5131 5132 ct_assert(NV_NPORT_MCID_ERROR_VECTOR__SIZE_1 == (NVSWITCH_MC_ID_ERROR_VECTOR_COUNT / 32)); 5133 5134 if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->link, NPORT)) 5135 { 5136 NVSWITCH_PRINT(device, ERROR, 5137 "CLEAR_MULTICAST_ID_ERROR_VECTOR: Invalid link %d\n", p->link); 5138 return -NVL_BAD_ARGS; 5139 } 5140 5141 for (i = 0; i < NV_NPORT_MCID_ERROR_VECTOR__SIZE_1; i++) 5142 { 5143 NVSWITCH_LINK_WR32(device, p->link, NPORT, _NPORT, 5144 _MCID_ERROR_VECTOR(i), p->error_vector[i]); 5145 } 5146 5147 return NVL_SUCCESS; 5148 } 5149 5150 void 5151 nvswitch_load_uuid_ls10 5152 ( 5153 nvswitch_device *device 5154 ) 5155 { 5156 NvU32 regData[4]; 5157 5158 // 5159 // Read 128-bit UUID from secure scratch registers which must be 5160 // populated by firmware. 5161 // 5162 regData[0] = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SECURE_SCRATCH_WARM_GROUP_1(0)); 5163 regData[1] = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SECURE_SCRATCH_WARM_GROUP_1(1)); 5164 regData[2] = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SECURE_SCRATCH_WARM_GROUP_1(2)); 5165 regData[3] = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SECURE_SCRATCH_WARM_GROUP_1(3)); 5166 5167 nvswitch_os_memcpy(&device->uuid.uuid, (NvU8 *)regData, NV_UUID_LEN); 5168 } 5169 5170 NvlStatus 5171 nvswitch_launch_ALI_ls10 5172 ( 5173 nvswitch_device *device 5174 ) 5175 { 5176 NvU64 enabledLinkMask; 5177 NvU64 forcedConfgLinkMask = 0; 5178 NvBool bEnableAli = NV_FALSE; 5179 NvU64 i = 0; 5180 nvlink_link *link; 5181 5182 enabledLinkMask = nvswitch_get_enabled_link_mask(device); 5183 forcedConfgLinkMask = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) + 5184 ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32); 5185 5186 // 5187 // Currently, we don't support a mix of forced/auto config links 5188 // return early 5189 // 5190 if (forcedConfgLinkMask != 0) 5191 { 5192 return -NVL_ERR_NOT_SUPPORTED; 5193 } 5194 5195 #ifdef INCLUDE_NVLINK_LIB 5196 bEnableAli = device->nvlink_device->enableALI; 5197 #endif 5198 5199 if (!bEnableAli) 5200 { 5201 NVSWITCH_PRINT(device, INFO, 5202 "%s: ALI not supported on the given device\n", 5203 __FUNCTION__); 5204 return NVL_ERR_GENERIC; 5205 } 5206 5207 FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) 5208 { 5209 NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); 5210 5211 link = nvswitch_get_link(device, i); 5212 5213 if ((link == NULL) || 5214 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber) || 5215 (i >= NVSWITCH_NVLINK_MAX_LINKS)) 5216 { 5217 continue; 5218 } 5219 5220 if (cciIsLinkManaged(device, i)) 5221 { 5222 continue; 5223 } 5224 nvswitch_launch_ALI_link_training(device, link, NV_FALSE); 5225 } 5226 FOR_EACH_INDEX_IN_MASK_END; 5227 5228 return NVL_SUCCESS; 5229 } 5230 5231 NvlStatus 5232 nvswitch_set_training_mode_ls10 5233 ( 5234 nvswitch_device *device 5235 ) 5236 { 5237 NvU64 enabledLinkMask, forcedConfgLinkMask; 5238 5239 NvU32 regVal; 5240 NvU64 i = 0; 5241 nvlink_link *link; 5242 5243 enabledLinkMask = nvswitch_get_enabled_link_mask(device); 5244 forcedConfgLinkMask = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) + 5245 ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32); 5246 5247 // 5248 // Currently, we don't support a mix of forced/auto config links 5249 // return early 5250 // 5251 if (forcedConfgLinkMask != 0) 5252 { 5253 NVSWITCH_PRINT(device, INFO, 5254 "%s: Forced-config set, skipping setting up link training selection\n", 5255 __FUNCTION__); 5256 return NVL_SUCCESS; 5257 } 5258 5259 if (device->regkeys.link_training_mode == NV_SWITCH_REGKEY_LINK_TRAINING_SELECT_ALI) 5260 { 5261 // 5262 // If ALI is force enabled then check to make sure ALI is supported 5263 // and write to the SYSTEM_CTRL register to force it to enabled 5264 // 5265 FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) 5266 { 5267 NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); 5268 5269 link = nvswitch_get_link(device, i); 5270 5271 if ((link == NULL) || 5272 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber) || 5273 (i >= NVSWITCH_NVLINK_MAX_LINKS)) 5274 { 5275 continue; 5276 } 5277 5278 regVal = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5279 _CTRL_CAP_LOCAL_LINK_CHANNEL); 5280 5281 if (!FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CAP_LOCAL_LINK_CHANNEL, _ALI_SUPPORT, _SUPPORTED, regVal)) 5282 { 5283 NVSWITCH_PRINT(device, ERROR, 5284 "%s: ALI training not supported! Regkey forcing ALI will be ignored\n",__FUNCTION__); 5285 return -NVL_ERR_NOT_SUPPORTED; 5286 } 5287 5288 NVSWITCH_PRINT(device, INFO, 5289 "%s: ALI training set on link: 0x%llx\n", 5290 __FUNCTION__, i); 5291 5292 regVal = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5293 _CTRL_SYSTEM_LINK_CHANNEL_CTRL2); 5294 5295 regVal = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL2, _ALI_ENABLE, _ENABLE, regVal); 5296 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5297 _CTRL_SYSTEM_LINK_CHANNEL_CTRL2, regVal); 5298 5299 } 5300 FOR_EACH_INDEX_IN_MASK_END; 5301 } 5302 else if (device->regkeys.link_training_mode == NV_SWITCH_REGKEY_LINK_TRAINING_SELECT_NON_ALI) 5303 { 5304 // If non-ALI is force enabled then disable ALI 5305 FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) 5306 { 5307 NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); 5308 5309 link = nvswitch_get_link(device, i); 5310 5311 if ((link == NULL) || 5312 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber) || 5313 (i >= NVSWITCH_NVLINK_MAX_LINKS)) 5314 { 5315 continue; 5316 } 5317 5318 regVal = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5319 _CTRL_SYSTEM_LINK_CHANNEL_CTRL2); 5320 5321 regVal = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL2, _ALI_ENABLE, _DISABLE, regVal); 5322 NVSWITCH_LINK_WR32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5323 _CTRL_SYSTEM_LINK_CHANNEL_CTRL2, regVal); 5324 5325 } 5326 FOR_EACH_INDEX_IN_MASK_END; 5327 5328 } 5329 else 5330 { 5331 // Else sanity check the SYSTEM register settings 5332 FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) 5333 { 5334 NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); 5335 5336 link = nvswitch_get_link(device, i); 5337 5338 if ((link == NULL) || 5339 !NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber) || 5340 (i >= NVSWITCH_NVLINK_MAX_LINKS)) 5341 { 5342 continue; 5343 } 5344 5345 regVal = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5346 _CTRL_SYSTEM_LINK_CHANNEL_CTRL2); 5347 5348 if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL2, _ALI_ENABLE, _ENABLE, regVal)) 5349 { 5350 5351 regVal = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, 5352 _CTRL_CAP_LOCAL_LINK_CHANNEL); 5353 5354 if (!FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CAP_LOCAL_LINK_CHANNEL, _ALI_SUPPORT, _SUPPORTED, regVal)) 5355 { 5356 NVSWITCH_PRINT(device, ERROR, 5357 "%s: ALI training not supported! Non-ALI will be used as the default.\n",__FUNCTION__); 5358 #ifdef INCLUDE_NVLINK_LIB 5359 device->nvlink_device->enableALI = NV_FALSE; 5360 #endif 5361 return NVL_SUCCESS; 5362 } 5363 #ifdef INCLUDE_NVLINK_LIB 5364 device->nvlink_device->enableALI = NV_TRUE; 5365 #endif 5366 } 5367 else 5368 { 5369 NVSWITCH_PRINT(device, ERROR, 5370 "%s: ALI training not enabled! Non-ALI will be used as the default.\n",__FUNCTION__); 5371 #ifdef INCLUDE_NVLINK_LIB 5372 device->nvlink_device->enableALI = NV_FALSE; 5373 #endif 5374 return NVL_SUCCESS; 5375 } 5376 } 5377 FOR_EACH_INDEX_IN_MASK_END; 5378 } 5379 5380 return NVL_SUCCESS; 5381 } 5382 5383 static void 5384 _nvswitch_get_nvlink_power_state_ls10 5385 ( 5386 nvswitch_device *device, 5387 NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret 5388 ) 5389 { 5390 nvlink_link *link; 5391 NvU32 linkState; 5392 NvU32 linkPowerState; 5393 NvU8 i; 5394 5395 // Determine power state for each enabled link 5396 FOR_EACH_INDEX_IN_MASK(64, i, ret->enabledLinkMask) 5397 { 5398 NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); 5399 5400 link = nvswitch_get_link(device, i); 5401 5402 if ((link == NULL) || 5403 (i >= NVSWITCH_NVLINK_MAX_LINKS)) 5404 { 5405 continue; 5406 } 5407 5408 linkState = ret->linkInfo[i].linkState; 5409 5410 switch (linkState) 5411 { 5412 case NVSWITCH_NVLINK_STATUS_LINK_STATE_ACTIVE: 5413 linkPowerState = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_CTRL); 5414 5415 if (FLD_TEST_DRF(_NVLIPT_LNK, _PWRM_CTRL, _L1_CURRENT_STATE, _L1, linkPowerState)) 5416 { 5417 linkPowerState = NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_L1; 5418 } 5419 else 5420 { 5421 linkPowerState = NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_L0; 5422 } 5423 break; 5424 5425 default: 5426 linkPowerState = NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_INVALID; 5427 break; 5428 } 5429 5430 ret->linkInfo[i].linkPowerState = linkPowerState; 5431 } 5432 FOR_EACH_INDEX_IN_MASK_END; 5433 } 5434 5435 NvlStatus 5436 nvswitch_ctrl_get_nvlink_status_ls10 5437 ( 5438 nvswitch_device *device, 5439 NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret 5440 ) 5441 { 5442 NvlStatus retval = NVL_SUCCESS; 5443 5444 retval = nvswitch_ctrl_get_nvlink_status_lr10(device, ret); 5445 5446 if (retval != NVL_SUCCESS) 5447 { 5448 return retval; 5449 } 5450 5451 _nvswitch_get_nvlink_power_state_ls10(device, ret); 5452 5453 return retval; 5454 } 5455 5456 NvlStatus 5457 nvswitch_parse_bios_image_ls10 5458 ( 5459 nvswitch_device *device 5460 ) 5461 { 5462 return nvswitch_parse_bios_image_lr10(device); 5463 } 5464 5465 static NvlStatus 5466 nvswitch_split_and_send_inband_data_ls10 5467 ( 5468 nvswitch_device *device, 5469 NvU32 linkId, 5470 nvswitch_inband_send_data *inBandData 5471 ) 5472 { 5473 NvlStatus status = NVL_SUCCESS; 5474 NvU32 i; 5475 NvU32 bytes; 5476 NvU32 maxSplitSize = NVLINK_INBAND_MAX_XFER_SIZE; 5477 NvU32 totalBytesToSend = inBandData->bufferSize; 5478 NvU32 numChunks = NV_ALIGN_UP(inBandData->bufferSize, maxSplitSize) / 5479 maxSplitSize; 5480 5481 inBandData->hdr.data = NVLINK_INBAND_DRV_HDR_TYPE_START; 5482 bytes = NV_MIN(totalBytesToSend, maxSplitSize); 5483 5484 NVSWITCH_ASSERT(numChunks != 0); 5485 5486 for (i = 0; i < numChunks; i++) 5487 { 5488 inBandData->bufferSize = bytes; 5489 // Last chunk 5490 if (i == (numChunks - 1)) 5491 { 5492 // 5493 // A chunk can have both _START and _END set at the same time, if it 5494 // is the only chunk being sent. 5495 // 5496 inBandData->hdr.data |= NVLINK_INBAND_DRV_HDR_TYPE_END; 5497 inBandData->hdr.data &= ~NVLINK_INBAND_DRV_HDR_TYPE_MID; // clear 5498 } 5499 5500 status = nvswitch_minion_send_inband_data_ls10(device, linkId, inBandData); 5501 if (status != NVL_SUCCESS) 5502 return status; 5503 5504 inBandData->sendBuffer += bytes; 5505 totalBytesToSend -= bytes; 5506 5507 bytes = NV_MIN(totalBytesToSend, maxSplitSize); 5508 inBandData->hdr.data = NVLINK_INBAND_DRV_HDR_TYPE_MID; 5509 } 5510 5511 return NVL_SUCCESS; 5512 } 5513 5514 void 5515 nvswitch_send_inband_nack_ls10 5516 ( 5517 nvswitch_device *device, 5518 NvU32 *hdr, 5519 NvU32 linkId 5520 ) 5521 { 5522 NvlStatus status; 5523 nvswitch_inband_send_data inBandData; 5524 nvlink_inband_msg_header_t *msghdr = (nvlink_inband_msg_header_t *)hdr; 5525 5526 msghdr->status = NV_ERR_FABRIC_MANAGER_NOT_PRESENT; 5527 switch (msghdr->type) 5528 { 5529 case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ: 5530 msghdr->type = NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_RSP; 5531 break; 5532 default: 5533 NVSWITCH_PRINT(device, ERROR, "%s:Wrong HDR type for NACK\n", 5534 __FUNCTION__); 5535 return; 5536 } 5537 msghdr->length = 0; 5538 5539 inBandData.sendBuffer = (NvU8 *)msghdr; 5540 inBandData.bufferSize = sizeof(nvlink_inband_msg_header_t); 5541 5542 status = nvswitch_split_and_send_inband_data_ls10(device, linkId, &inBandData); 5543 5544 if (status != NVL_SUCCESS) 5545 { 5546 NVSWITCH_PRINT(device, ERROR, "%s:Sending NACK failed\n", 5547 __FUNCTION__); 5548 } 5549 } 5550 5551 NvU32 5552 nvswitch_get_max_persistent_message_count_ls10 5553 ( 5554 nvswitch_device *device 5555 ) 5556 { 5557 return NUM_MAX_MCFLA_SLOTS_LS10; 5558 } 5559 5560 /* 5561 * CTRL_NVSWITCH_INBAND_SEND_DATA 5562 */ 5563 NvlStatus 5564 nvswitch_ctrl_inband_send_data_ls10 5565 ( 5566 nvswitch_device *device, 5567 NVSWITCH_INBAND_SEND_DATA_PARAMS *p 5568 ) 5569 { 5570 NvlStatus status; 5571 nvswitch_inband_send_data inBandData; 5572 5573 ct_assert(NVLINK_INBAND_MAX_MSG_SIZE == NVSWITCH_INBAND_DATA_SIZE); 5574 5575 if (p->dataSize == 0 || p->dataSize > NVSWITCH_INBAND_DATA_SIZE) 5576 { 5577 NVSWITCH_PRINT(device, ERROR, "Bad Inband data, got buffer of 0. Skipping Inband Send\n"); 5578 return -NVL_BAD_ARGS; 5579 } 5580 5581 if (!device->hal.nvswitch_is_link_valid(device, p->linkId)) 5582 { 5583 NVSWITCH_PRINT(device, ERROR, "Bad linkId %d is wrong\n", p->linkId); 5584 return -NVL_BAD_ARGS; 5585 } 5586 5587 inBandData.sendBuffer = p->buffer; 5588 inBandData.bufferSize = p->dataSize; 5589 5590 status = nvswitch_split_and_send_inband_data_ls10(device, p->linkId, &inBandData); 5591 5592 if (status != NVL_SUCCESS) 5593 { 5594 return status; 5595 } 5596 5597 p->dataSent = p->dataSize; 5598 5599 return NVL_SUCCESS; 5600 } 5601 5602 /* 5603 * CTRL_NVSWITCH_INBAND_READ_DATA 5604 */ 5605 NvlStatus 5606 nvswitch_ctrl_inband_read_data_ls10 5607 ( 5608 nvswitch_device *device, 5609 NVSWITCH_INBAND_READ_DATA_PARAMS *p 5610 ) 5611 { 5612 if (!device->hal.nvswitch_is_link_valid(device, p->linkId)) 5613 { 5614 NVSWITCH_PRINT(device, ERROR, "Bad linkId %d is wrong\n", p->linkId); 5615 return -NVL_BAD_ARGS; 5616 } 5617 5618 return nvswitch_inband_read_data(device, p->buffer, p->linkId, &p->dataSize); 5619 } 5620 5621 /* 5622 * CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 5623 */ 5624 NvlStatus 5625 nvswitch_ctrl_get_board_part_number_ls10 5626 ( 5627 nvswitch_device *device, 5628 NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p 5629 ) 5630 { 5631 struct inforom *pInforom = device->pInforom; 5632 INFOROM_OBD_OBJECT_V2_XX *pOBDObj; 5633 int byteIdx; 5634 5635 if (pInforom == NULL) 5636 { 5637 return -NVL_ERR_NOT_SUPPORTED; 5638 } 5639 5640 if (!pInforom->OBD.bValid) 5641 { 5642 NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n"); 5643 return -NVL_ERR_NOT_SUPPORTED; 5644 } 5645 5646 pOBDObj = &pInforom->OBD.object.v2; 5647 5648 if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008)) 5649 { 5650 NVSWITCH_PRINT(device, ERROR, 5651 "board part number available size %lu is not same as the request size %lu\n", 5652 sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data)); 5653 return -NVL_ERR_GENERIC; 5654 } 5655 5656 nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR)); 5657 5658 /* Copy board type data */ 5659 for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++) 5660 { 5661 p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF); 5662 } 5663 5664 return NVL_SUCCESS; 5665 } 5666 5667 NvlStatus 5668 nvswitch_ctrl_get_nvlink_lp_counters_ls10 5669 ( 5670 nvswitch_device *device, 5671 NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params 5672 ) 5673 { 5674 NvU32 counterValidMaskOut; 5675 NvU32 counterValidMask; 5676 NvU32 cntIdx; 5677 NV_STATUS status; 5678 NvU32 statData; 5679 5680 if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, params->linkId)) 5681 { 5682 return -NVL_BAD_ARGS; 5683 } 5684 5685 counterValidMaskOut = 0; 5686 counterValidMask = params->counterValidMask; 5687 5688 cntIdx = CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_NVHS; 5689 if (counterValidMask & NVBIT32(cntIdx)) 5690 { 5691 status = nvswitch_minion_get_dl_status(device, params->linkId, 5692 NV_NVLSTAT_TX01, 0, &statData); 5693 if (status != NVL_SUCCESS) 5694 { 5695 return status; 5696 } 5697 params->counterValues[cntIdx] = DRF_VAL(_NVLSTAT_TX01, _COUNT_TX_STATE, 5698 _NVHS_VALUE, statData); 5699 counterValidMaskOut |= NVBIT32(cntIdx); 5700 } 5701 5702 cntIdx = CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_OTHER; 5703 if (counterValidMask & NVBIT32(cntIdx)) 5704 { 5705 status = nvswitch_minion_get_dl_status(device, params->linkId, 5706 NV_NVLSTAT_TX02, 0, &statData); 5707 if (status != NVL_SUCCESS) 5708 { 5709 return status; 5710 } 5711 params->counterValues[cntIdx] = DRF_VAL(_NVLSTAT_TX02, _COUNT_TX_STATE, 5712 _OTHER_VALUE, statData); 5713 counterValidMaskOut |= NVBIT32(cntIdx); 5714 } 5715 5716 cntIdx = CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_NUM_TX_LP_ENTER; 5717 if (counterValidMask & NVBIT32(cntIdx)) 5718 { 5719 status = nvswitch_minion_get_dl_status(device, params->linkId, 5720 NV_NVLSTAT_TX06, 0, &statData); 5721 if (status != NVL_SUCCESS) 5722 { 5723 return status; 5724 } 5725 params->counterValues[cntIdx] = DRF_VAL(_NVLSTAT_TX06, _NUM_LCL, 5726 _LP_ENTER_VALUE, statData); 5727 counterValidMaskOut |= NVBIT32(cntIdx); 5728 } 5729 5730 cntIdx = CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_NUM_TX_LP_EXIT; 5731 if (counterValidMask & NVBIT32(cntIdx)) 5732 { 5733 status = nvswitch_minion_get_dl_status(device, params->linkId, 5734 NV_NVLSTAT_TX05, 0, &statData); 5735 if (status != NVL_SUCCESS) 5736 { 5737 return status; 5738 } 5739 params->counterValues[cntIdx] = DRF_VAL(_NVLSTAT_TX05, _NUM_LCL, 5740 _LP_EXIT_VALUE, statData); 5741 counterValidMaskOut |= NVBIT32(cntIdx); 5742 } 5743 5744 cntIdx = CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_SLEEP; 5745 if (counterValidMask & NVBIT32(cntIdx)) 5746 { 5747 status = nvswitch_minion_get_dl_status(device, params->linkId, 5748 NV_NVLSTAT_TX10, 0, &statData); 5749 if (status != NVL_SUCCESS) 5750 { 5751 return status; 5752 } 5753 params->counterValues[cntIdx] = DRF_VAL(_NVLSTAT_TX10, _COUNT_TX_STATE, 5754 _SLEEP_VALUE, statData); 5755 counterValidMaskOut |= NVBIT32(cntIdx); 5756 } 5757 5758 params->counterValidMask = counterValidMaskOut; 5759 5760 return NVL_SUCCESS; 5761 } 5762 5763 static NvlStatus 5764 nvswitch_ctrl_clear_counters_ls10 5765 ( 5766 nvswitch_device *device, 5767 NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret 5768 ) 5769 { 5770 nvlink_link *link; 5771 NvU8 i; 5772 NvU32 counterMask; 5773 NvlStatus status = NVL_SUCCESS; 5774 5775 counterMask = ret->counterMask; 5776 5777 FOR_EACH_INDEX_IN_MASK(64, i, ret->linkMask) 5778 { 5779 link = nvswitch_get_link(device, i); 5780 if (link == NULL) 5781 { 5782 continue; 5783 } 5784 5785 counterMask = ret->counterMask; 5786 5787 if ((counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS) || 5788 (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)) 5789 { 5790 status = nvswitch_minion_send_command(device, link->linkNumber, 5791 NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_MINION_MISCCNT, 0); 5792 if (status != NVL_SUCCESS) 5793 { 5794 NVSWITCH_PRINT(device, ERROR, "%s : Failed to clear misc count to MINION for link # %d\n", 5795 __FUNCTION__, link->linkNumber); 5796 } 5797 counterMask &= 5798 ~(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS | NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL); 5799 } 5800 5801 nvswitch_ctrl_clear_throughput_counters_ls10(device, link, counterMask); 5802 nvswitch_ctrl_clear_lp_counters_ls10(device, link, counterMask); 5803 status = nvswitch_ctrl_clear_dl_error_counters_ls10(device, link, counterMask); 5804 5805 // Return early with failure on clearing through minion 5806 if (status != NVL_SUCCESS) 5807 { 5808 NVSWITCH_PRINT(device, ERROR, 5809 "%s: Failure on clearing link counter mask 0x%x on link %d\n", 5810 __FUNCTION__, counterMask, link->linkNumber); 5811 break; 5812 } 5813 } 5814 FOR_EACH_INDEX_IN_MASK_END; 5815 5816 return status; 5817 } 5818 5819 NvlStatus 5820 nvswitch_ctrl_set_nvlink_error_threshold_ls10 5821 ( 5822 nvswitch_device *device, 5823 NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams 5824 ) 5825 { 5826 nvlink_link *link; 5827 NvU8 i; 5828 5829 FOR_EACH_INDEX_IN_MASK(64, i, pParams->link_mask) 5830 { 5831 link = nvswitch_get_link(device, i); 5832 if (link == NULL) 5833 { 5834 continue; 5835 } 5836 5837 if (pParams->errorThreshold[link->linkNumber].flags & NVSWITCH_NVLINK_ERROR_THRESHOLD_RESET) 5838 { 5839 link->errorThreshold.bUserConfig = NV_FALSE; 5840 5841 // Disable the interrupt 5842 nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_FALSE); 5843 5844 // Set to default value 5845 nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE); 5846 5847 // Enable the interrupt 5848 nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE); 5849 } 5850 else 5851 { 5852 link->errorThreshold.thresholdMan = 5853 pParams->errorThreshold[link->linkNumber].thresholdMan; 5854 link->errorThreshold.thresholdExp = 5855 pParams->errorThreshold[link->linkNumber].thresholdExp; 5856 link->errorThreshold.timescaleMan = 5857 pParams->errorThreshold[link->linkNumber].timescaleMan; 5858 link->errorThreshold.timescaleExp = 5859 pParams->errorThreshold[link->linkNumber].timescaleExp; 5860 link->errorThreshold.bInterruptEn = 5861 pParams->errorThreshold[link->linkNumber].bInterruptEn; 5862 link->errorThreshold.bUserConfig = NV_TRUE; 5863 5864 // Disable the interrupt 5865 nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_FALSE); 5866 5867 // Set the Error threshold 5868 nvswitch_set_error_rate_threshold_ls10(link, NV_FALSE); 5869 5870 // Configure the interrupt 5871 nvswitch_configure_error_rate_threshold_interrupt_ls10(link, 5872 pParams->errorThreshold[link->linkNumber].bInterruptEn); 5873 } 5874 } 5875 FOR_EACH_INDEX_IN_MASK_END; 5876 5877 return NVL_SUCCESS; 5878 } 5879 5880 NvlStatus 5881 nvswitch_ctrl_get_nvlink_error_threshold_ls10 5882 ( 5883 nvswitch_device *device, 5884 NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams 5885 ) 5886 { 5887 nvlink_link *link; 5888 NvU8 i; 5889 5890 FOR_EACH_INDEX_IN_MASK(64, i, pParams->link_mask) 5891 { 5892 link = nvswitch_get_link(device, i); 5893 if (link == NULL) 5894 { 5895 continue; 5896 } 5897 5898 // Get the Error threshold 5899 nvswitch_get_error_rate_threshold_ls10(link); 5900 5901 pParams->errorThreshold[link->linkNumber].thresholdMan = 5902 link->errorThreshold.thresholdMan; 5903 pParams->errorThreshold[link->linkNumber].thresholdExp = 5904 link->errorThreshold.thresholdExp; 5905 pParams->errorThreshold[link->linkNumber].timescaleMan = 5906 link->errorThreshold.timescaleMan; 5907 pParams->errorThreshold[link->linkNumber].timescaleExp = 5908 link->errorThreshold.timescaleExp; 5909 pParams->errorThreshold[link->linkNumber].bInterruptEn = 5910 link->errorThreshold.bInterruptEn; 5911 pParams->errorThreshold[link->linkNumber].bInterruptTrigerred = 5912 link->errorThreshold.bInterruptTrigerred; 5913 } 5914 FOR_EACH_INDEX_IN_MASK_END; 5915 5916 return NVL_SUCCESS; 5917 } 5918 5919 NvlStatus 5920 nvswitch_get_board_id_ls10 5921 ( 5922 nvswitch_device *device, 5923 NvU16 *pBoardId 5924 ) 5925 { 5926 NvlStatus ret; 5927 NvU32 biosOemVersionBytes; 5928 5929 if (pBoardId == NULL) 5930 { 5931 return -NVL_BAD_ARGS; 5932 } 5933 5934 // Check if bios valid 5935 ret = nvswitch_lib_get_bios_version(device, NULL); 5936 if (ret != NVL_SUCCESS) 5937 { 5938 return ret; 5939 } 5940 5941 biosOemVersionBytes = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW_SW, 5942 _OEM_BIOS_VERSION); 5943 *pBoardId = DRF_VAL(_NVLSAW_SW, _OEM_BIOS_VERSION, _BOARD_ID, biosOemVersionBytes); 5944 5945 return NVL_SUCCESS; 5946 } 5947 5948 NvlStatus 5949 nvswitch_check_io_sanity_ls10 5950 ( 5951 nvswitch_device *device 5952 ) 5953 { 5954 NvBool keepPolling; 5955 NVSWITCH_TIMEOUT timeout; 5956 NvU32 val; 5957 NvBool error = NV_FALSE; 5958 NvU32 sxid; 5959 const char *sxid_desc = NULL; 5960 5961 // 5962 // NOTE: MMIO discovery has not been performed so only constant BAR0 offset 5963 // addressing can be performed. 5964 // 5965 5966 // BAR0 offset 0 should always contain valid data -- unless it doesn't 5967 val = NVSWITCH_OFF_RD32(device, 0); 5968 if (val == 0) 5969 { 5970 error = NV_TRUE; 5971 sxid = NVSWITCH_ERR_HW_HOST_FIRMWARE_RECOVERY_MODE; 5972 sxid_desc = "Firmware recovery mode"; 5973 } 5974 else if ((val == 0xFFFFFFFF) || ((val & 0xFFFF0000) == 0xBADF0000)) 5975 { 5976 error = NV_TRUE; 5977 sxid = NVSWITCH_ERR_HW_HOST_IO_FAILURE; 5978 sxid_desc = "IO failure"; 5979 } 5980 else if (!IS_FMODEL(device)) 5981 { 5982 // check if FSP successfully started 5983 nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); 5984 do 5985 { 5986 keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; 5987 5988 val = NVSWITCH_REG_RD32(device, _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS); 5989 if (FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, val)) 5990 { 5991 break; 5992 } 5993 5994 nvswitch_os_sleep(1); 5995 } 5996 while (keepPolling); 5997 if (!FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, val)) 5998 { 5999 error = NV_TRUE; 6000 sxid = NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE; 6001 sxid_desc = "Firmware initialization failure"; 6002 } 6003 } 6004 6005 if (error) 6006 { 6007 NVSWITCH_RAW_ERROR_LOG_TYPE report = { 0, { 0 } }; 6008 NVSWITCH_RAW_ERROR_LOG_TYPE report_saw = {0, { 0 }}; 6009 NvU32 report_idx = 0; 6010 NvU32 i; 6011 6012 val = NVSWITCH_REG_RD32(device, _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS); 6013 report.data[report_idx++] = val; 6014 NVSWITCH_PRINT(device, ERROR, "%s: -- _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS (0x%x) != _SUCCESS --\n", 6015 __FUNCTION__, val); 6016 6017 for (i = 0; i <= 15; i++) 6018 { 6019 val = NVSWITCH_OFF_RD32(device, 6020 NV_PTOP_UNICAST_SW_DEVICE_BASE_SAW_0 + NV_NVLSAW_SW_SCRATCH(i)); 6021 report_saw.data[i] = val; 6022 NVSWITCH_PRINT(device, ERROR, "%s: -- NV_NVLSAW_SW_SCRATCH(%d) = 0x%08x\n", 6023 __FUNCTION__, i, val); 6024 } 6025 6026 for (i = 0; i < NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1; i++) 6027 { 6028 val = NVSWITCH_REG_RD32(device, _PFSP, _FALCON_COMMON_SCRATCH_GROUP_2(i)); 6029 report.data[report_idx++] = val; 6030 NVSWITCH_PRINT(device, ERROR, "%s: -- NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(%d) = 0x%08x\n", 6031 __FUNCTION__, i, val); 6032 } 6033 6034 // Include useful scratch information for triage 6035 NVSWITCH_PRINT_SXID_NO_BBX(device, sxid, 6036 "Fatal, %s (0x%x/0x%x, 0x%x, 0x%x, 0x%x/0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", sxid_desc, 6037 report.data[0], report.data[1], report.data[2], report.data[3], report.data[4], 6038 report_saw.data[0], report_saw.data[1], report_saw.data[12], report_saw.data[14], report_saw.data[15]); 6039 6040 return -NVL_INITIALIZATION_TOTAL_FAILURE; 6041 } 6042 6043 return NVL_SUCCESS; 6044 } 6045 6046 /* 6047 * @brief: This function returns the current value of the SOE heartbeat gpio 6048 * @params[in] device reference to current nvswitch device 6049 * @params[in] p NVSWITCH_GET_SOE_HEARTBEAT_PARAMS 6050 */ 6051 NvlStatus 6052 nvswitch_ctrl_get_soe_heartbeat_ls10 6053 ( 6054 nvswitch_device *device, 6055 NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p 6056 ) 6057 { 6058 NvU32 gpioVal = 0; 6059 NvU64 hi = 0; 6060 NvU64 lo = 0; 6061 NvU64 test = 0; 6062 6063 if (!nvswitch_is_cci_supported(device)) 6064 { 6065 return -NVL_ERR_NOT_SUPPORTED; 6066 } 6067 6068 // Read status of heartbeat gpio 6069 gpioVal = NVSWITCH_REG_RD32(device, _GPIO, _OUTPUT_CNTL(3)); 6070 6071 // Record timestamp of gpio read from PTIMER 6072 do 6073 { 6074 hi = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_1); 6075 lo = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_0); 6076 test = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_1); 6077 } 6078 while (hi != test); 6079 p->timestampNs = (hi << 32) | lo; 6080 6081 if (FLD_TEST_DRF(_GPIO, _OUTPUT_CNTL, _IO_OUTPUT, _1, gpioVal)) 6082 { 6083 p->gpioVal = 1; 6084 } 6085 else if (FLD_TEST_DRF(_GPIO, _OUTPUT_CNTL, _IO_OUTPUT, _0, gpioVal)) 6086 { 6087 p->gpioVal = 0; 6088 } 6089 6090 return NVL_SUCCESS; 6091 } 6092 6093 static NvlStatus 6094 nvswitch_cci_reset_and_drain_links_ls10 6095 ( 6096 nvswitch_device *device, 6097 NvU64 link_mask, 6098 NvBool bForced 6099 ) 6100 { 6101 NvU32 link; 6102 6103 FOR_EACH_INDEX_IN_MASK(64, link, link_mask) 6104 { 6105 if (!cciIsLinkManaged(device, link)) 6106 { 6107 link_mask = link_mask & ~NVBIT64(link); 6108 } 6109 } 6110 FOR_EACH_INDEX_IN_MASK_END; 6111 6112 return _nvswitch_reset_and_drain_links_ls10(device, link_mask, bForced); 6113 } 6114 6115 /* 6116 * @brief Set the next LED state 6117 * The HW will reflect this state on the next iteration of link 6118 * state update. 6119 */ 6120 static void 6121 _nvswitch_set_next_led_state_ls10 6122 ( 6123 nvswitch_device *device, 6124 NvU8 nextLedState 6125 ) 6126 { 6127 device->next_led_state = nextLedState; 6128 } 6129 6130 /* 6131 * Returns the CPLD register value assigned to a particular LED state 6132 * confluence page ID: 1011518154 6133 */ 6134 static NvU8 6135 _nvswitch_get_led_state_regval_ls10 6136 ( 6137 nvswitch_device *device, 6138 NvU8 ledState 6139 ) 6140 { 6141 switch (ledState) 6142 { 6143 case ACCESS_LINK_LED_STATE_OFF: 6144 { 6145 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_OFF; 6146 } 6147 case ACCESS_LINK_LED_STATE_UP_WARM: 6148 { 6149 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_GREEN; 6150 } 6151 case ACCESS_LINK_LED_STATE_INITIALIZE: 6152 { 6153 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_3HZ_AMBER; 6154 } 6155 case ACCESS_LINK_LED_STATE_UP_ACTIVE: 6156 { 6157 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_3HZ_GREEN; 6158 } 6159 case ACCESS_LINK_LED_STATE_FAULT: 6160 { 6161 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_6HZ_AMBER; 6162 } 6163 default: 6164 { 6165 NVSWITCH_ASSERT(0); 6166 return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_OFF; 6167 } 6168 } 6169 } 6170 6171 /* 6172 * @brief Set HW LED state using CPLD write 6173 * 6174 */ 6175 static NvlStatus 6176 _nvswitch_set_led_state_ls10 6177 ( 6178 nvswitch_device *device 6179 ) 6180 { 6181 NvlStatus retval; 6182 NvU8 ledState; 6183 NvU8 nextLedState; 6184 NvU8 regVal = 0; 6185 6186 nextLedState = device->next_led_state; 6187 ledState = REF_VAL(ACCESS_LINK_LED_STATE, nextLedState); 6188 6189 regVal = FLD_SET_REF_NUM(CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED, 6190 _nvswitch_get_led_state_regval_ls10(device, ledState), 6191 regVal); 6192 6193 // Set state for LED 6194 retval = nvswitch_cci_ports_cpld_write(device, CPLD_MACHXO3_ACCESS_LINK_LED_CTL, regVal); 6195 if (retval != NVL_SUCCESS) 6196 { 6197 return retval; 6198 } 6199 6200 // save HW state 6201 device->current_led_state = REF_NUM(ACCESS_LINK_LED_STATE, ledState); 6202 6203 return NVL_SUCCESS; 6204 } 6205 6206 static NvBool 6207 _nvswitch_check_for_link_traffic 6208 ( 6209 nvswitch_device *device, 6210 NvU64 linkMask 6211 ) 6212 { 6213 NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *pCounterParams = NULL; 6214 NvU64 *pCounterValues; 6215 NvU64 tpCounterPreviousSum; 6216 NvU64 tpCounterCurrentSum; 6217 NvBool bTraffic = NV_FALSE; 6218 NvU8 linkNum; 6219 6220 pCounterParams = nvswitch_os_malloc(sizeof(*pCounterParams)); 6221 if (pCounterParams == NULL) 6222 goto out; 6223 6224 pCounterParams->counterMask = NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX | 6225 NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX; 6226 pCounterParams->linkMask = linkMask; 6227 if (nvswitch_ctrl_get_throughput_counters(device, 6228 pCounterParams) != NVL_SUCCESS) 6229 { 6230 goto out; 6231 } 6232 6233 // Sum TX/RX traffic for each link 6234 FOR_EACH_INDEX_IN_MASK(64, linkNum, linkMask) 6235 { 6236 pCounterValues = pCounterParams->counters[linkNum].values; 6237 6238 tpCounterPreviousSum = device->tp_counter_previous_sum[linkNum]; 6239 6240 // Sum taken to save space as it is unlikely to overflow before system is reset 6241 tpCounterCurrentSum = pCounterValues[NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX] + 6242 pCounterValues[NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX]; 6243 6244 device->tp_counter_previous_sum[linkNum] = tpCounterCurrentSum; 6245 6246 if (tpCounterCurrentSum > tpCounterPreviousSum) 6247 { 6248 bTraffic = NV_TRUE; 6249 } 6250 } 6251 FOR_EACH_INDEX_IN_MASK_END; 6252 6253 out: 6254 nvswitch_os_free(pCounterParams); 6255 return bTraffic; 6256 } 6257 6258 static NvU8 6259 _nvswitch_resolve_led_state_ls10 6260 ( 6261 nvswitch_device *device, 6262 NvU8 ledState0, 6263 NvU8 ledState1 6264 ) 6265 { 6266 // Used to resolve link state discrepancies between partner links 6267 ct_assert(ACCESS_LINK_LED_STATE_FAULT < ACCESS_LINK_LED_STATE_OFF); 6268 ct_assert(ACCESS_LINK_LED_STATE_OFF < ACCESS_LINK_LED_STATE_INITIALIZE); 6269 ct_assert(ACCESS_LINK_LED_STATE_INITIALIZE < ACCESS_LINK_LED_STATE_UP_WARM); 6270 6271 return (ledState0 < ledState1 ? ledState0 : ledState1); 6272 } 6273 6274 static NvU8 6275 _nvswitch_get_next_led_state_link_ls10 6276 ( 6277 nvswitch_device *device, 6278 NvU8 currentLedState, 6279 NvU8 linkNum 6280 ) 6281 { 6282 nvlink_link *link; 6283 NvU64 linkState; 6284 6285 link = nvswitch_get_link(device, linkNum); 6286 6287 if ((link == NULL) || 6288 (device->hal.nvswitch_corelib_get_dl_link_mode(link, &linkState) != NVL_SUCCESS)) 6289 { 6290 return ACCESS_LINK_LED_STATE_OFF; 6291 } 6292 6293 switch (linkState) 6294 { 6295 case NVLINK_LINKSTATE_OFF: 6296 { 6297 return ACCESS_LINK_LED_STATE_OFF; 6298 } 6299 case NVLINK_LINKSTATE_HS: 6300 case NVLINK_LINKSTATE_RECOVERY: 6301 case NVLINK_LINKSTATE_SLEEP: 6302 { 6303 return ACCESS_LINK_LED_STATE_UP_WARM; 6304 } 6305 case NVLINK_LINKSTATE_FAULT: 6306 { 6307 return ACCESS_LINK_LED_STATE_FAULT; 6308 } 6309 default: 6310 { 6311 if (currentLedState == ACCESS_LINK_LED_STATE_INITIALIZE) 6312 { 6313 return ACCESS_LINK_LED_STATE_INITIALIZE; 6314 } 6315 return ACCESS_LINK_LED_STATE_OFF; 6316 } 6317 } 6318 } 6319 6320 static NvU8 6321 _nvswitch_get_next_led_state_links_ls10 6322 ( 6323 nvswitch_device *device, 6324 NvU8 currentLedState, 6325 NvU64 linkMask 6326 ) 6327 { 6328 NvU8 linkNum; 6329 NvU8 ledState; 6330 NvU8 nextLedState; 6331 6332 nextLedState = ACCESS_LINK_NUM_LED_STATES; 6333 6334 NVSWITCH_ASSERT(linkMask != 0); 6335 FOR_EACH_INDEX_IN_MASK(64, linkNum, linkMask) 6336 { 6337 ledState = _nvswitch_get_next_led_state_link_ls10(device, currentLedState, linkNum); 6338 nextLedState = _nvswitch_resolve_led_state_ls10(device, nextLedState, ledState); 6339 } 6340 FOR_EACH_INDEX_IN_MASK_END; 6341 6342 if (nextLedState == ACCESS_LINK_LED_STATE_UP_WARM) 6343 { 6344 // Only tells us that one of the links has activity 6345 if (_nvswitch_check_for_link_traffic(device, linkMask)) 6346 { 6347 nextLedState = ACCESS_LINK_LED_STATE_UP_ACTIVE; 6348 } 6349 } 6350 6351 return nextLedState; 6352 } 6353 6354 static NvU8 6355 _nvswitch_get_next_led_state_ls10 6356 ( 6357 nvswitch_device *device 6358 ) 6359 { 6360 NvU8 linkNum; 6361 NvU8 ledNextState = 0; 6362 NvU8 currentLedState; 6363 NvU64 enabledLinkMask; 6364 6365 enabledLinkMask = nvswitch_get_enabled_link_mask(device); 6366 6367 FOR_EACH_INDEX_IN_MASK(64, linkNum, enabledLinkMask) 6368 { 6369 if (cciIsLinkManaged(device, linkNum)) 6370 { 6371 enabledLinkMask = enabledLinkMask & ~NVBIT64(linkNum); 6372 } 6373 } 6374 FOR_EACH_INDEX_IN_MASK_END; 6375 6376 currentLedState = device->current_led_state; 6377 currentLedState = REF_VAL(ACCESS_LINK_LED_STATE, currentLedState); 6378 6379 ledNextState = FLD_SET_REF_NUM(ACCESS_LINK_LED_STATE, 6380 _nvswitch_get_next_led_state_links_ls10(device, 6381 currentLedState, 6382 enabledLinkMask), 6383 ledNextState); 6384 6385 return ledNextState; 6386 } 6387 6388 void 6389 nvswitch_update_link_state_led_ls10 6390 ( 6391 nvswitch_device *device 6392 ) 6393 { 6394 NvU8 currentLedState; 6395 NvU8 nextLedState; 6396 6397 currentLedState = device->current_led_state; 6398 6399 currentLedState = REF_VAL(ACCESS_LINK_LED_STATE, currentLedState); 6400 nextLedState = _nvswitch_get_next_led_state_ls10(device); 6401 6402 // This is the next state that the LED will be set to 6403 _nvswitch_set_next_led_state_ls10(device, nextLedState); 6404 6405 // Only update HW if required 6406 if (currentLedState != nextLedState) 6407 { 6408 _nvswitch_set_led_state_ls10(device); 6409 } 6410 } 6411 6412 void 6413 nvswitch_led_shutdown_ls10 6414 ( 6415 nvswitch_device *device 6416 ) 6417 { 6418 NvU8 ledState = 0; 6419 ledState = FLD_SET_REF_NUM(ACCESS_LINK_LED_STATE, 6420 ACCESS_LINK_LED_STATE_OFF, ledState); 6421 6422 // This is the next state that the LED will be set to 6423 _nvswitch_set_next_led_state_ls10(device, ledState); 6424 _nvswitch_set_led_state_ls10(device); 6425 } 6426 6427 NvlStatus 6428 nvswitch_read_vbios_link_entries_ls10 6429 ( 6430 nvswitch_device *device, 6431 NvU32 tblPtr, 6432 NvU32 expected_link_entriesCount, 6433 NVLINK_CONFIG_DATA_LINKENTRY *link_entries, 6434 NvU32 *identified_link_entriesCount 6435 ) 6436 { 6437 NV_STATUS status = NV_ERR_INVALID_PARAMETER; 6438 NvU32 i; 6439 NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_30 vbios_link_entry; 6440 *identified_link_entriesCount = 0; 6441 6442 for (i = 0; i < expected_link_entriesCount; i++) 6443 { 6444 if (!device->bIsNvlinkVbiosTableVersion2) 6445 { 6446 status = device->hal.nvswitch_vbios_read_structure(device, 6447 &vbios_link_entry, 6448 tblPtr, (NvU32 *)0, 6449 NVLINK_CONFIG_DATA_LINKENTRY_FMT_30); 6450 } 6451 else 6452 { 6453 status = device->hal.nvswitch_vbios_read_structure(device, 6454 &vbios_link_entry, 6455 tblPtr, (NvU32 *)0, 6456 NVLINK_CONFIG_DATA_LINKENTRY_FMT_20); 6457 } 6458 if (status != NV_OK) 6459 { 6460 NVSWITCH_PRINT(device, ERROR, 6461 "%s: Error on reading nvlink entry\n", 6462 __FUNCTION__); 6463 return status; 6464 } 6465 link_entries[i].nvLinkparam0 = (NvU8)vbios_link_entry.nvLinkparam0; 6466 link_entries[i].nvLinkparam1 = (NvU8)vbios_link_entry.nvLinkparam1; 6467 link_entries[i].nvLinkparam2 = (NvU8)vbios_link_entry.nvLinkparam2; 6468 link_entries[i].nvLinkparam3 = (NvU8)vbios_link_entry.nvLinkparam3; 6469 link_entries[i].nvLinkparam4 = (NvU8)vbios_link_entry.nvLinkparam4; 6470 link_entries[i].nvLinkparam5 = (NvU8)vbios_link_entry.nvLinkparam5; 6471 link_entries[i].nvLinkparam6 = (NvU8)vbios_link_entry.nvLinkparam6; 6472 if (!device->bIsNvlinkVbiosTableVersion2) 6473 { 6474 link_entries[i].nvLinkparam7 = (NvU8)vbios_link_entry.nvLinkparam7; 6475 link_entries[i].nvLinkparam8 = (NvU8)vbios_link_entry.nvLinkparam8; 6476 link_entries[i].nvLinkparam9 = (NvU8)vbios_link_entry.nvLinkparam9; 6477 } 6478 if (!device->bIsNvlinkVbiosTableVersion2) 6479 tblPtr += (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_30)/sizeof(NvU32)); 6480 else 6481 tblPtr += (sizeof(NVLINK_VBIOS_CONFIG_DATA_LINKENTRY_20)/sizeof(NvU32)); 6482 6483 6484 NVSWITCH_PRINT(device, NOISY, 6485 "<<<---- NvLink ID 0x%x ---->>>\n", i); 6486 NVSWITCH_PRINT(device, NOISY, 6487 "NVLink Params 0 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam0, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam0)); 6488 NVSWITCH_PRINT(device, NOISY, 6489 "NVLink Params 1 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam1, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam1)); 6490 NVSWITCH_PRINT(device, NOISY, 6491 "NVLink Params 2 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam2, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam2)); 6492 NVSWITCH_PRINT(device, NOISY, 6493 "NVLink Params 3 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam3, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam3)); 6494 NVSWITCH_PRINT(device, NOISY, 6495 "NVLink Params 4 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam4, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam4)); 6496 NVSWITCH_PRINT(device, NOISY, 6497 "NVLink Params 5 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam5, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam5)); 6498 NVSWITCH_PRINT(device, NOISY, 6499 "NVLink Params 6 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam6, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam6)); 6500 NVSWITCH_PRINT(device, NOISY, 6501 "NVLink Params 7 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam7, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam7)); 6502 NVSWITCH_PRINT(device, NOISY, 6503 "NVLink Params 8 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam8, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam8)); 6504 NVSWITCH_PRINT(device, NOISY, 6505 "NVLink Params 9 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam9, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam9)); 6506 NVSWITCH_PRINT(device, NOISY, 6507 "<<<---- NvLink ID 0x%x ---->>>\n\n", i); 6508 } 6509 *identified_link_entriesCount = i; 6510 return status; 6511 } 6512 6513 NvlStatus 6514 nvswitch_ctrl_get_bios_info_ls10 6515 ( 6516 nvswitch_device *device, 6517 NVSWITCH_GET_BIOS_INFO_PARAMS *p 6518 ) 6519 { 6520 NvU32 biosVersionBytes; 6521 NvU32 biosOemVersionBytes; 6522 NvU32 biosMagic = 0x9610; 6523 6524 // 6525 // Example: 96.10.09.00.00 is the formatted version string 6526 // | | | 6527 // | | |__ BIOS OEM version byte 6528 // | | 6529 // |_________|_____ BIOS version bytes 6530 // 6531 biosVersionBytes = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW_SW, _BIOS_VERSION); 6532 biosOemVersionBytes = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW_SW, _OEM_BIOS_VERSION); 6533 6534 // 6535 // LS10 is built out of core96 and the BIOS version will always begin with 6536 // 96.10.xx.xx.xx 6537 // 6538 if ((biosVersionBytes >> 16) != biosMagic) 6539 { 6540 NVSWITCH_PRINT(device, ERROR, 6541 "BIOS version not found in scratch register\n"); 6542 return -NVL_ERR_INVALID_STATE; 6543 } 6544 6545 p->version = (((NvU64)biosVersionBytes) << 8) | (biosOemVersionBytes & 0xff); 6546 6547 return NVL_SUCCESS; 6548 } 6549 6550 NvlStatus 6551 nvswitch_ctrl_get_inforom_version_ls10 6552 ( 6553 nvswitch_device *device, 6554 NVSWITCH_GET_INFOROM_VERSION_PARAMS *p 6555 ) 6556 { 6557 struct inforom *pInforom = device->pInforom; 6558 6559 if ((pInforom == NULL) || (!pInforom->IMG.bValid)) 6560 { 6561 return -NVL_ERR_NOT_SUPPORTED; 6562 } 6563 6564 if (NV_ARRAY_ELEMENTS(pInforom->IMG.object.version) < 6565 NVSWITCH_INFOROM_VERSION_LEN) 6566 { 6567 NVSWITCH_PRINT(device, ERROR, 6568 "Inforom IMG object struct smaller than expected\n"); 6569 return -NVL_ERR_INVALID_STATE; 6570 } 6571 6572 nvswitch_inforom_string_copy(pInforom->IMG.object.version, p->version, 6573 NVSWITCH_INFOROM_VERSION_LEN); 6574 6575 return NVL_SUCCESS; 6576 } 6577 6578 /* 6579 * @Brief : Initializes an NvSwitch hardware state 6580 * 6581 * @Description : 6582 * 6583 * @param[in] device a reference to the device to initialize 6584 * 6585 * @returns NVL_SUCCESS if the action succeeded 6586 * -NVL_BAD_ARGS if bad arguments provided 6587 * -NVL_PCI_ERROR if bar info unable to be retrieved 6588 */ 6589 NvlStatus 6590 nvswitch_initialize_device_state_ls10 6591 ( 6592 nvswitch_device *device 6593 ) 6594 { 6595 device->bModeContinuousALI = NV_TRUE; 6596 return nvswitch_initialize_device_state_lr10(device); 6597 } 6598 6599 // 6600 // This function auto creates the ls10 HAL connectivity from the NVSWITCH_INIT_HAL 6601 // macro in haldef_nvswitch.h 6602 // 6603 // Note: All hal fns must be implemented for each chip. 6604 // There is no automatic stubbing here. 6605 // 6606 void nvswitch_setup_hal_ls10(nvswitch_device *device) 6607 { 6608 device->chip_arch = NVSWITCH_GET_INFO_INDEX_ARCH_LS10; 6609 device->chip_impl = NVSWITCH_GET_INFO_INDEX_IMPL_LS10; 6610 6611 NVSWITCH_INIT_HAL(device, ls10); 6612 NVSWITCH_INIT_HAL_LS10(device, ls10); 6613 } 6614 6615