1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "kernel/gpu/fifo/kernel_fifo.h" 25 #include "kernel/gpu/fifo/kernel_channel_group_api.h" 26 #include "kernel/gpu/fifo/kernel_channel_group.h" 27 #include "kernel/gpu/ce/kernel_ce_shared.h" 28 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h" 29 #include "kernel/gpu/bus/kern_bus.h" 30 31 #include "vgpu/vgpu_events.h" 32 33 #include "published/ampere/ga100/dev_fault.h" 34 #include "published/ampere/ga100/dev_ram.h" 35 #include "published/ampere/ga100/dev_ctrl.h" 36 37 38 NV_STATUS 39 kfifoEngineInfoXlate_GA100 40 ( 41 OBJGPU *pGpu, 42 KernelFifo *pKernelFifo, 43 ENGINE_INFO_TYPE inType, 44 NvU32 inVal, 45 ENGINE_INFO_TYPE outType, 46 NvU32 *pOutVal 47 ) 48 { 49 KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); 50 51 // We no longer store ENGINE_INFO_TYPE_INTR on Ampere+ (bug 24110055) 52 if (inType == ENGINE_INFO_TYPE_INTR || outType == ENGINE_INFO_TYPE_INTR) 53 { 54 return NV_ERR_NOT_SUPPORTED; 55 } 56 57 // 58 // We need extra logic for translation when SMC is enabled and input or output is 59 // MMU_FAULT_ID because device Info cannot translate MMU_FAULT_ID to/from any type for GR > 0 60 // 61 if (IS_MIG_IN_USE(pGpu)) 62 { 63 NvU32 baseGrFaultId; 64 NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, 65 ENGINE_INFO_TYPE_ENG_DESC, ENG_GR(0), 66 ENGINE_INFO_TYPE_MMU_FAULT_ID, &baseGrFaultId)); 67 68 if (inType == ENGINE_INFO_TYPE_MMU_FAULT_ID) 69 { 70 NvU32 subctxId, grIdx; 71 NvU32 maxSubctx = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE); 72 73 // check if input fault ID corresponds to GR 74 if ((inVal >= baseGrFaultId) && (inVal < (baseGrFaultId + maxSubctx))) 75 { 76 subctxId = inVal - baseGrFaultId; 77 NV_ASSERT_OK_OR_RETURN(kgrmgrGetGrIdxForVeid(pGpu, pKernelGraphicsManager, subctxId, &grIdx)); 78 inVal = RM_ENGINE_TYPE_GR(grIdx); 79 inType = ENGINE_INFO_TYPE_RM_ENGINE_TYPE; 80 } 81 } 82 83 if (outType == ENGINE_INFO_TYPE_MMU_FAULT_ID) 84 { 85 NvU32 grIdx, startSubctxId; 86 NV_STATUS status; 87 RM_ENGINE_TYPE rmEngineType; 88 89 status = kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, inType, inVal, 90 ENGINE_INFO_TYPE_RM_ENGINE_TYPE, (NvU32 *)&rmEngineType); 91 if (status != NV_OK) 92 return status; 93 94 // check if rmEngineType corresponding to input is GR 95 if (RM_ENGINE_TYPE_IS_GR(rmEngineType)) 96 { 97 grIdx = RM_ENGINE_TYPE_GR_IDX(rmEngineType); 98 NV_ASSERT_OK_OR_RETURN(kgrmgrGetVeidBaseForGrIdx(pGpu, pKernelGraphicsManager, grIdx, &startSubctxId)); 99 *pOutVal = baseGrFaultId + startSubctxId; 100 return NV_OK; 101 } 102 } 103 } 104 105 return kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, inType, inVal, outType, pOutVal); 106 } 107 108 109 /*! 110 * @brief Get the local maximum number of subctx allowed in this TSG 111 * 112 * @param pGpu 113 * @param pKernelFifo 114 * @param[in] pKernelChannelGroup 115 * @param[in] bLegacyMode Is TSG in legacy mode. 116 */ 117 NvU32 118 kfifoChannelGroupGetLocalMaxSubcontext_GA100 119 ( 120 OBJGPU *pGpu, 121 KernelFifo *pKernelFifo, 122 KernelChannelGroup *pKernelChannelGroup, 123 NvBool bLegacyMode 124 ) 125 { 126 KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); 127 128 NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_ARGUMENT); 129 130 if (IS_MIG_IN_USE(pGpu) && !bLegacyMode && 131 RM_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType)) 132 { 133 NvU32 grIdx = RM_ENGINE_TYPE_GR_IDX(pKernelChannelGroup->engineType); 134 return nvPopCount64(kgrmgrGetGrIdxVeidMask(pGpu, pKernelGraphicsManager, grIdx)); 135 } 136 137 // In SMC-Legacy mode, revert to pre-Ampere behavior 138 return kfifoChannelGroupGetLocalMaxSubcontext_GM107(pGpu, pKernelFifo, 139 pKernelChannelGroup, 140 bLegacyMode); 141 } 142 143 /*! 144 * @brief Update the usermode doorbell register with work submit token to notify 145 * host that work is available on this channel. 146 * 147 * @param[in] pGpu 148 * @param[in] pKernelFifo 149 * @param[in] workSubmitToken Token to update the doorbell with 150 * @param[in] runlistId Runlist ID 151 */ 152 NV_STATUS 153 kfifoUpdateUsermodeDoorbell_GA100 154 ( 155 OBJGPU *pGpu, 156 KernelFifo *pKernelFifo, 157 NvU32 workSubmitToken, 158 NvU32 runlistId 159 ) 160 { 161 // 162 // Updating the usermode doorbell is different for CPU vs. GSP. 163 // 164 if (!RMCFG_FEATURE_PLATFORM_GSP) 165 { 166 return kfifoUpdateUsermodeDoorbell_TU102(pGpu, pKernelFifo, workSubmitToken, runlistId); 167 } 168 else 169 { 170 return kfifoUpdateInternalDoorbellForUsermode_HAL(pGpu, pKernelFifo, workSubmitToken, runlistId); 171 } 172 173 return NV_OK; 174 } 175 176 /*! 177 * @brief Construct the worksubmit token. Caller cannot make assumption about this handle. 178 * 179 * @param[in] pGpu 180 * @param[in] pKernelFifo 181 * @param[in] pKernelChannel 182 * @param[out] pGeneratedToken Store the generated token 183 * @param[in] bUsedForHost Used on Host RM 184 * 185 */ 186 NV_STATUS 187 kfifoGenerateWorkSubmitToken_GA100 188 ( 189 OBJGPU *pGpu, 190 KernelFifo *pKernelFifo, 191 KernelChannel *pKernelChannel, 192 NvU32 *pGeneratedToken, 193 NvBool bUsedForHost 194 ) 195 { 196 NvU32 chId; 197 NvU32 gfId; 198 NvU32 runlistId; 199 NvU32 val = 0; 200 201 NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); 202 203 NV_ASSERT_OR_RETURN(pGeneratedToken != NULL, NV_ERR_INVALID_OBJECT); 204 NV_ASSERT_OR_RETURN((pKernelChannel->pKernelChannelGroupApi != NULL) && 205 (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL), 206 NV_ERR_INVALID_STATE); 207 208 chId = pKernelChannel->ChID; 209 210 NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfId)); 211 212 if (!RMCFG_FEATURE_PLATFORM_GSP || (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_GFID_VF(gfId))) 213 { 214 215 // TODO: Remove check on Ampere. Bug 200606706. 216 if (!bUsedForHost && IS_GFID_VF(gfId)) 217 { 218 NvU32 vChId; 219 220 NV_ASSERT_OK_OR_RETURN(kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, 221 chId, gfId, 222 kchannelGetEngineType(pKernelChannel), 223 &vChId)); 224 chId = vChId; 225 } 226 227 // TODO: Remove, on Ampere channels should be set to a valid runlist before allocation. Bug 200606706. 228 if (!kchannelIsRunlistSet(pGpu, pKernelChannel)) 229 { 230 NV_PRINTF(LEVEL_NOTICE, 231 "FAILED Channel 0x%x is not assigned to runlist yet\n", 232 chId); 233 return NV_ERR_INVALID_STATE; 234 } 235 236 runlistId = kchannelGetRunlistId(pKernelChannel); 237 238 // Here we construct token to be a concatenation of runlist id and channel id 239 val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _RUNLIST_ID, runlistId, val); 240 val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _VECTOR, chId, val); 241 242 NV_PRINTF(LEVEL_INFO, 243 "Generated workSubmitToken 0x%x for channel 0x%x runlist 0x%x\n", 244 val, chId, runlistId); 245 } 246 else // RMCFG_FEATURE_PLATFORM_GSP 247 { 248 NV_ASSERT_OK_OR_RETURN(kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel)); 249 } 250 251 *pGeneratedToken = val; 252 253 return NV_OK; 254 } 255 256 /** 257 * @brief Get the runlist base shift amount 258 * 259 * @param pKernelFifo 260 * 261 * @return shift amount 262 */ 263 NvU32 264 kfifoRunlistGetBaseShift_GA100 265 ( 266 KernelFifo *pKernelFifo 267 ) 268 { 269 return NV_RAMRL_ENTRY_BASE_SHIFT; 270 } 271 272 /*! 273 * Special function to be used early when the CHID_MGRs aren't and cannot be 274 * constructed in all cases. Do not use otherwise 275 */ 276 NvU32 277 kfifoGetMaxCeChannelGroups_GA100 278 ( 279 OBJGPU *pGpu, 280 KernelFifo *pKernelFifo 281 ) 282 { 283 ENGDESCRIPTOR eng = 0; 284 NvU32 deviceIndex; 285 const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); 286 NvU32 maxCeChannels = 0; 287 288 // If called before kfifoConstructEngineList has executed 289 if (pEngineInfo == NULL) 290 return 0; 291 292 // 293 // We can't use chidMgr here as this gets called before chidMgr is completely initialized 294 // Use device info table instead 295 // 296 for (deviceIndex = 0; deviceIndex < pEngineInfo->engineInfoListSize; deviceIndex++) 297 { 298 eng = pEngineInfo->engineInfoList[deviceIndex].engineData[ENGINE_INFO_TYPE_ENG_DESC]; 299 300 // All GR CE use the same pool as GR 301 if ((eng == ENG_GR(0)) || 302 (IS_CE(eng) && 303 (!ceIsCeGrce(pGpu, pEngineInfo->engineInfoList[deviceIndex].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE])))) 304 { 305 maxCeChannels += kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, 0); 306 } 307 } 308 309 // override max channels if we can run out of BAR2 page tables 310 if (kbusIsBug2751296LimitBar2PtSize(GPU_GET_KERNEL_BUS(pGpu))) 311 { 312 // 2k for GR CE and 2k for the rest 313 maxCeChannels = 4096; 314 } 315 316 return maxCeChannels; 317 } 318 319 /*! 320 * @brief Provides PBDMA Fault IDs for requested engines 321 * 322 * @param[IN] pGpu Pointer to OBJGPU instance 323 * @param[IN] pKernelFifo Pointer to KernelFifo instance 324 * @param[IN] type Engine_info_type to determine if en engine need to be searched 325 * or not. 326 * @param[IN] val Engine value associated with the requested INFO_TYPE 327 * @param[OUT] ppPbdmaFaultIds Buffer to fill output PBDMA_FAULT_IDS 328 * @param[OUT] pNumPbdmas Number of PBDMAs or valid buffer entries in above buffer 329 * 330 */ 331 NV_STATUS 332 kfifoGetEnginePbdmaFaultIds_GA100 333 ( 334 OBJGPU *pGpu, 335 KernelFifo *pKernelFifo, 336 ENGINE_INFO_TYPE type, 337 NvU32 val, 338 NvU32 **ppPbdmaFaultIds, 339 NvU32 *pNumPbdmas 340 ) 341 { 342 const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); 343 NvU32 i; 344 345 *pNumPbdmas = 0; 346 347 if (pEngineInfo == NULL) 348 { 349 NV_ASSERT_OK_OR_RETURN(kfifoConstructEngineList_HAL(pGpu, pKernelFifo)); 350 351 pEngineInfo = kfifoGetEngineInfo(pKernelFifo); 352 NV_ASSERT_OR_RETURN(pEngineInfo != NULL, NV_ERR_INVALID_STATE); 353 } 354 355 if (type == ENGINE_INFO_TYPE_INVALID) 356 { 357 NV_ASSERT_OR_RETURN(val < pEngineInfo->engineInfoListSize, NV_ERR_INVALID_ARGUMENT); 358 *ppPbdmaFaultIds = pEngineInfo->engineInfoList[val].pbdmaFaultIds; 359 *pNumPbdmas = pEngineInfo->engineInfoList[val].numPbdmas; 360 return NV_OK; 361 } 362 363 for (i = 0; i < pEngineInfo->engineInfoListSize; i++) 364 { 365 if (pEngineInfo->engineInfoList[i].engineData[type] == val) 366 { 367 *ppPbdmaFaultIds = pEngineInfo->engineInfoList[i].pbdmaFaultIds; 368 *pNumPbdmas = pEngineInfo->engineInfoList[i].numPbdmas; 369 return NV_OK; 370 } 371 } 372 373 return NV_ERR_OBJECT_NOT_FOUND; 374 } 375 376 /** 377 * @brief Get the number of PBDMAs 378 * 379 * @param pGpu OBJGPU pointer 380 * @param pKernelFifo KernelFifo pointer 381 */ 382 NvU32 383 kfifoGetNumPBDMAs_GA100 384 ( 385 OBJGPU *pGpu, 386 KernelFifo *pKernelFifo 387 ) 388 { 389 const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); 390 391 NV_PRINTF(LEVEL_INFO, "%d PBDMAs\n", pEngineInfo->maxNumPbdmas); 392 NV_ASSERT(pEngineInfo->maxNumPbdmas != 0); 393 394 return pEngineInfo->maxNumPbdmas; 395 } 396 397 /** 398 * @brief Convert PBDMA ID to string 399 * @param[in] pGpu 400 * @param[in] pKernelFifo 401 * @param[in] pbdmaId 402 * 403 * @return cont string 404 */ 405 const char* 406 kfifoPrintPbdmaId_GA100 407 ( 408 OBJGPU *pGpu, 409 KernelFifo *pKernelFifo, 410 NvU32 pbdmaId 411 ) 412 { 413 NV_ASSERT_OR_RETURN(pbdmaId < kfifoGetNumPBDMAs_HAL(pGpu, pKernelFifo), "UNKNOWN"); 414 static const char* pbdmaIdString[64] = { "HOST0" , "HOST1" , "HOST2" , "HOST3" , "HOST4" , "HOST5" , "HOST6" , "HOST7" , 415 "HOST8" , "HOST9" , "HOST10", "HOST11", "HOST12", "HOST13", "HOST14", "HOST15", 416 "HOST16", "HOST17", "HOST18", "HOST19", "HOST20", "HOST21", "HOST22", "HOST23", 417 "HOST24", "HOST25", "HOST26", "HOST27", "HOST28", "HOST29", "HOST30", "HOST31", 418 "HOST32", "HOST33", "HOST34", "HOST35", "HOST36", "HOST37", "HOST38", "HOST39", 419 "HOST40", "HOST41", "HOST42", "HOST43", "HOST44", "HOST45", "HOST46", "HOST47", 420 "HOST48", "HOST49", "HOST50", "HOST51", "HOST52", "HOST53", "HOST54", "HOST55", 421 "HOST56", "HOST57", "HOST58", "HOST59", "HOST60", "HOST61", "HOST62", "HOST63"}; 422 423 return pbdmaIdString[pbdmaId]; 424 } 425 426 /*! 427 * @brief Converts a mmu engine id (NV_PFAULT_MMU_ENG_ID_*) into a string. 428 * 429 * @param[in] pGpu 430 * @param[in] pKernelFifo 431 * @param[in] engineID NV_PFAULT_MMU_ENG_ID_* 432 * 433 * @returns a string (always non-null) 434 */ 435 const char* 436 kfifoPrintInternalEngine_GA100 437 ( 438 OBJGPU *pGpu, 439 KernelFifo *pKernelFifo, 440 NvU32 engineID 441 ) 442 { 443 NV_STATUS status = NV_OK; 444 KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); 445 NvU32 pbdmaId; 446 NvU32 engTag; 447 448 if (kfifoIsMmuFaultEngineIdPbdma(pGpu, pKernelFifo, engineID)) 449 { 450 NV_ASSERT_OR_RETURN(kfifoGetPbdmaIdFromMmuFaultId(pGpu, pKernelFifo, engineID, &pbdmaId) == NV_OK, "UNKNOWN"); 451 return kfifoPrintPbdmaId_HAL(pGpu, pKernelFifo, pbdmaId); 452 } 453 454 if (kgmmuIsFaultEngineBar1_HAL(pKernelGmmu, engineID)) 455 { 456 return "BAR1"; 457 } 458 else if (kgmmuIsFaultEngineBar2_HAL(pKernelGmmu, engineID)) 459 { 460 return "BAR2"; 461 } 462 463 switch (engineID) 464 { 465 case NV_PFAULT_MMU_ENG_ID_DISPLAY: 466 return "DISPLAY"; 467 case NV_PFAULT_MMU_ENG_ID_IFB: 468 return "IFB"; 469 case NV_PFAULT_MMU_ENG_ID_SEC: 470 return "SEC"; 471 case NV_PFAULT_MMU_ENG_ID_PERF: 472 return "PERF"; 473 case NV_PFAULT_MMU_ENG_ID_NVDEC0: 474 return "NVDEC0"; 475 case NV_PFAULT_MMU_ENG_ID_NVDEC1: 476 return "NVDEC1"; 477 case NV_PFAULT_MMU_ENG_ID_NVDEC2: 478 return "NVDEC2"; 479 case NV_PFAULT_MMU_ENG_ID_NVDEC3: 480 return "NVDEC3"; 481 case NV_PFAULT_MMU_ENG_ID_CE0: 482 return "CE0"; 483 case NV_PFAULT_MMU_ENG_ID_CE1: 484 return "CE1"; 485 case NV_PFAULT_MMU_ENG_ID_CE2: 486 return "CE2"; 487 case NV_PFAULT_MMU_ENG_ID_CE3: 488 return "CE3"; 489 case NV_PFAULT_MMU_ENG_ID_CE4: 490 return "CE4"; 491 case NV_PFAULT_MMU_ENG_ID_CE5: 492 return "CE5"; 493 case NV_PFAULT_MMU_ENG_ID_CE6: 494 return "CE6"; 495 case NV_PFAULT_MMU_ENG_ID_CE7: 496 return "CE7"; 497 case NV_PFAULT_MMU_ENG_ID_CE8: 498 return "CE8"; 499 case NV_PFAULT_MMU_ENG_ID_CE9: 500 return "CE9"; 501 case NV_PFAULT_MMU_ENG_ID_PWR_PMU: 502 return "PMU"; 503 case NV_PFAULT_MMU_ENG_ID_PTP: 504 return "PTP"; 505 case NV_PFAULT_MMU_ENG_ID_NVENC1: 506 return "NVENC1"; 507 case NV_PFAULT_MMU_ENG_ID_PHYSICAL: 508 return "PHYSICAL"; 509 case NV_PFAULT_MMU_ENG_ID_NVJPG0: 510 return "NVJPG"; 511 case NV_PFAULT_MMU_ENG_ID_OFA0: 512 return "OFA"; 513 case NV_PFAULT_MMU_ENG_ID_FLA: 514 return "FLA"; 515 default: 516 { 517 const char *engine = 518 kfifoPrintInternalEngineCheck_HAL(pGpu, pKernelFifo, engineID); 519 if (engine != NULL) 520 { 521 return engine; 522 } 523 } 524 } 525 526 status = kfifoEngineInfoXlate_HAL(pGpu, 527 pKernelFifo, 528 ENGINE_INFO_TYPE_MMU_FAULT_ID, 529 engineID, 530 ENGINE_INFO_TYPE_ENG_DESC, 531 &engTag); 532 if ((NV_OK == status) && IS_GR(engTag)) 533 { 534 switch (engTag) 535 { 536 case ENG_GR(0): 537 return "GRAPHICS"; 538 case ENG_GR(1): 539 return "GR1"; 540 case ENG_GR(2): 541 return "GR2"; 542 case ENG_GR(3): 543 return "GR3"; 544 case ENG_GR(4): 545 return "GR4"; 546 case ENG_GR(5): 547 return "GR5"; 548 case ENG_GR(6): 549 return "GR6"; 550 case ENG_GR(7): 551 return "GR7"; 552 } 553 } 554 555 return "UNKNOWN"; 556 } 557 558 /*! 559 * @brief Converts a mmu engine id (NV_PFAULT_MMU_ENG_ID_*) into a string. 560 * 561 * @param[in] pGpu 562 * @param[in] pKernelFifo 563 * @param[in] engineID NV_PFAULT_MMU_ENG_ID_* 564 * 565 * @returns a string 566 */ 567 const char* 568 kfifoPrintInternalEngineCheck_GA100 569 ( 570 OBJGPU *pGpu, 571 KernelFifo *pKernelFifo, 572 NvU32 engineID 573 ) 574 { 575 switch (engineID) 576 { 577 case NV_PFAULT_MMU_ENG_ID_NVDEC4: 578 return "NVDEC4"; 579 case NV_PFAULT_MMU_ENG_ID_NVENC0: 580 return "NVENC0"; 581 default: 582 return NULL; 583 } 584 } 585 586 /** 587 * @brief Converts a subid/clientid into a client string 588 * 589 * @param[in] pGpu 590 * @param[in] pKernelFifo 591 * @param[in] pMmuExceptData 592 593 * @returns a string (always non-null) 594 */ 595 const char* 596 kfifoGetClientIdString_GA100 597 ( 598 OBJGPU *pGpu, 599 KernelFifo *pKernelFifo, 600 FIFO_MMU_EXCEPTION_DATA *pMmuExceptInfo 601 ) 602 { 603 if (pMmuExceptInfo->bGpc) 604 { 605 switch (pMmuExceptInfo->clientId) 606 { 607 case NV_PFAULT_CLIENT_GPC_T1_0: 608 return "GPCCLIENT_T1_0"; 609 case NV_PFAULT_CLIENT_GPC_T1_1: 610 return "GPCCLIENT_T1_1"; 611 case NV_PFAULT_CLIENT_GPC_T1_2: 612 return "GPCCLIENT_T1_2"; 613 case NV_PFAULT_CLIENT_GPC_T1_3: 614 return "GPCCLIENT_T1_3"; 615 case NV_PFAULT_CLIENT_GPC_T1_4: 616 return "GPCCLIENT_T1_4"; 617 case NV_PFAULT_CLIENT_GPC_T1_5: 618 return "GPCCLIENT_T1_5"; 619 case NV_PFAULT_CLIENT_GPC_T1_6: 620 return "GPCCLIENT_T1_6"; 621 case NV_PFAULT_CLIENT_GPC_T1_7: 622 return "GPCCLIENT_T1_7"; 623 case NV_PFAULT_CLIENT_GPC_PE_0: 624 return "GPCCLIENT_PE_0"; 625 case NV_PFAULT_CLIENT_GPC_PE_1: 626 return "GPCCLIENT_PE_1"; 627 case NV_PFAULT_CLIENT_GPC_PE_2: 628 return "GPCCLIENT_PE_2"; 629 case NV_PFAULT_CLIENT_GPC_PE_3: 630 return "GPCCLIENT_PE_3"; 631 case NV_PFAULT_CLIENT_GPC_PE_4: 632 return "GPCCLIENT_PE_4"; 633 case NV_PFAULT_CLIENT_GPC_PE_5: 634 return "GPCCLIENT_PE_5"; 635 case NV_PFAULT_CLIENT_GPC_PE_6: 636 return "GPCCLIENT_PE_6"; 637 case NV_PFAULT_CLIENT_GPC_PE_7: 638 return "GPCCLIENT_PE_7"; 639 case NV_PFAULT_CLIENT_GPC_RAST: 640 return "GPCCLIENT_RAST"; 641 case NV_PFAULT_CLIENT_GPC_GCC: 642 return "GPCCLIENT_GCC"; 643 case NV_PFAULT_CLIENT_GPC_GPCCS: 644 return "GPCCLIENT_GPCCS"; 645 case NV_PFAULT_CLIENT_GPC_PROP_0: 646 return "GPCCLIENT_PROP_0"; 647 case NV_PFAULT_CLIENT_GPC_PROP_1: 648 return "GPCCLIENT_PROP_1"; 649 case NV_PFAULT_CLIENT_GPC_T1_8: 650 return "GPCCLIENT_T1_8"; 651 case NV_PFAULT_CLIENT_GPC_T1_9: 652 return "GPCCLIENT_T1_9"; 653 case NV_PFAULT_CLIENT_GPC_T1_10: 654 return "GPCCLIENT_T1_10"; 655 case NV_PFAULT_CLIENT_GPC_T1_11: 656 return "GPCCLIENT_T1_11"; 657 case NV_PFAULT_CLIENT_GPC_T1_12: 658 return "GPCCLIENT_T1_12"; 659 case NV_PFAULT_CLIENT_GPC_T1_13: 660 return "GPCCLIENT_T1_13"; 661 case NV_PFAULT_CLIENT_GPC_T1_14: 662 return "GPCCLIENT_T1_14"; 663 case NV_PFAULT_CLIENT_GPC_T1_15: 664 return "GPCCLIENT_T1_15"; 665 case NV_PFAULT_CLIENT_GPC_TPCCS_0: 666 return "GPCCLIENT_TPCCS_0"; 667 case NV_PFAULT_CLIENT_GPC_TPCCS_1: 668 return "GPCCLIENT_TPCCS_1"; 669 case NV_PFAULT_CLIENT_GPC_TPCCS_2: 670 return "GPCCLIENT_TPCCS_2"; 671 case NV_PFAULT_CLIENT_GPC_TPCCS_3: 672 return "GPCCLIENT_TPCCS_3"; 673 case NV_PFAULT_CLIENT_GPC_TPCCS_4: 674 return "GPCCLIENT_TPCCS_4"; 675 case NV_PFAULT_CLIENT_GPC_TPCCS_5: 676 return "GPCCLIENT_TPCCS_5"; 677 case NV_PFAULT_CLIENT_GPC_TPCCS_6: 678 return "GPCCLIENT_TPCCS_6"; 679 case NV_PFAULT_CLIENT_GPC_TPCCS_7: 680 return "GPCCLIENT_TPCCS_7"; 681 case NV_PFAULT_CLIENT_GPC_PE_8: 682 return "GPCCLIENT_PE_8"; 683 case NV_PFAULT_CLIENT_GPC_TPCCS_8: 684 return "GPCCLIENT_TPCCS_8"; 685 case NV_PFAULT_CLIENT_GPC_T1_16: 686 return "GPCCLIENT_T1_16"; 687 case NV_PFAULT_CLIENT_GPC_T1_17: 688 return "GPCCLIENT_T1_17"; 689 case NV_PFAULT_CLIENT_GPC_ROP_0: 690 return "GPCCLIENT_ROP_0"; 691 case NV_PFAULT_CLIENT_GPC_ROP_1: 692 return "GPCCLIENT_ROP_1"; 693 case NV_PFAULT_CLIENT_GPC_GPM: 694 return "GPCCLIENT_GPM"; 695 default: 696 return "UNRECOGNIZED_CLIENT"; 697 } 698 } 699 else 700 { 701 switch (pMmuExceptInfo->clientId) 702 { 703 case NV_PFAULT_CLIENT_HUB_VIP: 704 return "HUBCLIENT_VIP"; 705 case NV_PFAULT_CLIENT_HUB_CE0: 706 return "HUBCLIENT_CE0"; 707 case NV_PFAULT_CLIENT_HUB_CE1: 708 return "HUBCLIENT_CE1"; 709 case NV_PFAULT_CLIENT_HUB_DNISO: 710 return "HUBCLIENT_DNISO"; 711 case NV_PFAULT_CLIENT_HUB_FE: 712 return "HUBCLIENT_FE"; 713 case NV_PFAULT_CLIENT_HUB_FECS: 714 return "HUBCLIENT_FECS"; 715 case NV_PFAULT_CLIENT_HUB_HOST: 716 return "HUBCLIENT_HOST"; 717 case NV_PFAULT_CLIENT_HUB_HOST_CPU: 718 return "HUBCLIENT_HOST_CPU"; 719 case NV_PFAULT_CLIENT_HUB_HOST_CPU_NB: 720 return "HUBCLIENT_HOST_CPU_NB"; 721 case NV_PFAULT_CLIENT_HUB_ISO: 722 return "HUBCLIENT_ISO"; 723 case NV_PFAULT_CLIENT_HUB_MMU: 724 return "HUBCLIENT_MMU"; 725 case NV_PFAULT_CLIENT_HUB_NVDEC0: 726 return "HUBCLIENT_NVDEC0"; 727 case NV_PFAULT_CLIENT_HUB_NVENC1: 728 return "HUBCLIENT_NVENC1"; 729 case NV_PFAULT_CLIENT_HUB_NISO: 730 return "HUBCLIENT_NISO"; 731 case NV_PFAULT_CLIENT_HUB_P2P: 732 return "HUBCLIENT_P2P"; 733 case NV_PFAULT_CLIENT_HUB_PD: 734 return "HUBCLIENT_PD"; 735 case NV_PFAULT_CLIENT_HUB_PERF0: 736 return "HUBCLIENT_PERF"; 737 case NV_PFAULT_CLIENT_HUB_PMU: 738 return "HUBCLIENT_PMU"; 739 case NV_PFAULT_CLIENT_HUB_RASTERTWOD: 740 return "HUBCLIENT_RASTERTWOD"; 741 case NV_PFAULT_CLIENT_HUB_SCC: 742 return "HUBCLIENT_SCC"; 743 case NV_PFAULT_CLIENT_HUB_SCC_NB: 744 return "HUBCLIENT_SCC_NB"; 745 case NV_PFAULT_CLIENT_HUB_SEC: 746 return "HUBCLIENT_SEC"; 747 case NV_PFAULT_CLIENT_HUB_SSYNC: 748 return "HUBCLIENT_SSYNC"; 749 case NV_PFAULT_CLIENT_HUB_CE2: 750 return "HUBCLIENT_CE2"; 751 case NV_PFAULT_CLIENT_HUB_XV: 752 return "HUBCLIENT_XV"; 753 case NV_PFAULT_CLIENT_HUB_MMU_NB: 754 return "HUBCLIENT_MMU_NB"; 755 case NV_PFAULT_CLIENT_HUB_DFALCON: 756 return "HUBCLIENT_DFALCON"; 757 case NV_PFAULT_CLIENT_HUB_SKED: 758 return "HUBCLIENT_SKED"; 759 case NV_PFAULT_CLIENT_HUB_AFALCON: 760 return "HUBCLIENT_AFALCON"; 761 case NV_PFAULT_CLIENT_HUB_DONT_CARE: 762 return "HUBCLIENT_DONT_CARE"; 763 case NV_PFAULT_CLIENT_HUB_HSCE0: 764 return "HUBCLIENT_HSCE0"; 765 case NV_PFAULT_CLIENT_HUB_HSCE1: 766 return "HUBCLIENT_HSCE1"; 767 case NV_PFAULT_CLIENT_HUB_HSCE2: 768 return "HUBCLIENT_HSCE2"; 769 case NV_PFAULT_CLIENT_HUB_HSCE3: 770 return "HUBCLIENT_HSCE3"; 771 case NV_PFAULT_CLIENT_HUB_HSCE4: 772 return "HUBCLIENT_HSCE4"; 773 case NV_PFAULT_CLIENT_HUB_HSCE5: 774 return "HUBCLIENT_HSCE5"; 775 case NV_PFAULT_CLIENT_HUB_HSCE6: 776 return "HUBCLIENT_HSCE6"; 777 case NV_PFAULT_CLIENT_HUB_HSCE7: 778 return "HUBCLIENT_HSCE7"; 779 case NV_PFAULT_CLIENT_HUB_HSCE8: 780 return "HUBCLIENT_HSCE8"; 781 case NV_PFAULT_CLIENT_HUB_HSCE9: 782 return "HUBCLIENT_HSCE9"; 783 case NV_PFAULT_CLIENT_HUB_HSHUB: 784 return "HUBCLIENT_HSHUB"; 785 case NV_PFAULT_CLIENT_HUB_PTP_X0: 786 return "HUBCLIENT_PTP_X0"; 787 case NV_PFAULT_CLIENT_HUB_PTP_X1: 788 return "HUBCLIENT_PTP_X1"; 789 case NV_PFAULT_CLIENT_HUB_PTP_X2: 790 return "HUBCLIENT_PTP_X2"; 791 case NV_PFAULT_CLIENT_HUB_PTP_X3: 792 return "HUBCLIENT_PTP_X3"; 793 case NV_PFAULT_CLIENT_HUB_PTP_X4: 794 return "HUBCLIENT_PTP_X4"; 795 case NV_PFAULT_CLIENT_HUB_PTP_X5: 796 return "HUBCLIENT_PTP_X5"; 797 case NV_PFAULT_CLIENT_HUB_NVENC2: 798 return "HUBCLIENT_NVENC2"; 799 case NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0: 800 return "HUBCLIENT_VPR_SCRUBBER0"; 801 case NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1: 802 return "HUBCLIENT_VPR_SCRUBBER1"; 803 case NV_PFAULT_CLIENT_HUB_DWBIF: 804 return "HUBCLIENT_DWBIF"; 805 case NV_PFAULT_CLIENT_HUB_FBFALCON: 806 return "HUBCLIENT_FBFALCON"; 807 case NV_PFAULT_CLIENT_HUB_CE_SHIM: 808 return "HUBCLIENT_CE_SHIM"; 809 case NV_PFAULT_CLIENT_HUB_GSP: 810 return "HUBCLIENT_GSP"; 811 case NV_PFAULT_CLIENT_HUB_NVDEC1: 812 return "HUBCLIENT_NVDEC1"; 813 case NV_PFAULT_CLIENT_HUB_NVDEC2: 814 return "HUBCLIENT_NVDEC2"; 815 case NV_PFAULT_CLIENT_HUB_NVJPG0: 816 return "HUBCLIENT_NVJPG0"; 817 case NV_PFAULT_CLIENT_HUB_NVDEC3: 818 return "HUBCLIENT_NVDEC3"; 819 case NV_PFAULT_CLIENT_HUB_OFA0: 820 return "HUBCLIENT_OFA0"; 821 case NV_PFAULT_CLIENT_HUB_HSCE10: 822 return "HUBCLIENT_HSCE10"; 823 case NV_PFAULT_CLIENT_HUB_HSCE11: 824 return "HUBCLIENT_HSCE11"; 825 case NV_PFAULT_CLIENT_HUB_HSCE12: 826 return "HUBCLIENT_HSCE12"; 827 case NV_PFAULT_CLIENT_HUB_HSCE13: 828 return "HUBCLIENT_HSCE13"; 829 case NV_PFAULT_CLIENT_HUB_HSCE14: 830 return "HUBCLIENT_HSCE14"; 831 case NV_PFAULT_CLIENT_HUB_HSCE15: 832 return "HUBCLIENT_HSCE15"; 833 case NV_PFAULT_CLIENT_HUB_FE1: 834 return "HUBCLIENT_FE1"; 835 case NV_PFAULT_CLIENT_HUB_FE2: 836 return "HUBCLIENT_FE2"; 837 case NV_PFAULT_CLIENT_HUB_FE3: 838 return "HUBCLIENT_FE3"; 839 case NV_PFAULT_CLIENT_HUB_FE4: 840 return "HUBCLIENT_FE4"; 841 case NV_PFAULT_CLIENT_HUB_FE5: 842 return "HUBCLIENT_FE5"; 843 case NV_PFAULT_CLIENT_HUB_FE6: 844 return "HUBCLIENT_FE6"; 845 case NV_PFAULT_CLIENT_HUB_FE7: 846 return "HUBCLIENT_FE7"; 847 case NV_PFAULT_CLIENT_HUB_FECS1: 848 return "HUBCLIENT_FECS1"; 849 case NV_PFAULT_CLIENT_HUB_FECS2: 850 return "HUBCLIENT_FECS2"; 851 case NV_PFAULT_CLIENT_HUB_FECS3: 852 return "HUBCLIENT_FECS3"; 853 case NV_PFAULT_CLIENT_HUB_FECS4: 854 return "HUBCLIENT_FECS4"; 855 case NV_PFAULT_CLIENT_HUB_FECS5: 856 return "HUBCLIENT_FECS5"; 857 case NV_PFAULT_CLIENT_HUB_FECS6: 858 return "HUBCLIENT_FECS6"; 859 case NV_PFAULT_CLIENT_HUB_FECS7: 860 return "HUBCLIENT_FECS7"; 861 case NV_PFAULT_CLIENT_HUB_SKED1: 862 return "HUBCLIENT_SKED1"; 863 case NV_PFAULT_CLIENT_HUB_SKED2: 864 return "HUBCLIENT_SKED2"; 865 case NV_PFAULT_CLIENT_HUB_SKED3: 866 return "HUBCLIENT_SKED3"; 867 case NV_PFAULT_CLIENT_HUB_SKED4: 868 return "HUBCLIENT_SKED4"; 869 case NV_PFAULT_CLIENT_HUB_SKED5: 870 return "HUBCLIENT_SKED5"; 871 case NV_PFAULT_CLIENT_HUB_SKED6: 872 return "HUBCLIENT_SKED6"; 873 case NV_PFAULT_CLIENT_HUB_SKED7: 874 return "HUBCLIENT_SKED7"; 875 case NV_PFAULT_CLIENT_HUB_ESC: 876 return "HUBCLIENT_ESC"; 877 default: 878 return kfifoGetClientIdStringCheck_HAL(pGpu, pKernelFifo, pMmuExceptInfo->clientId); 879 } 880 } 881 } 882 883 884 /** 885 * @brief Converts a subid/clientid into a client string 886 * 887 * @param[in] pGpu 888 * @param[in] pKernelFifo 889 * @param[in] clientId 890 891 * @returns a string (always non-null) 892 */ 893 const char* 894 kfifoGetClientIdStringCheck_GA100 895 ( 896 OBJGPU *pGpu, 897 KernelFifo *pKernelFifo, 898 NvU32 clientId 899 ) 900 { 901 switch (clientId) 902 { 903 case NV_PFAULT_CLIENT_HUB_NVENC0: 904 return "HUBCLIENT_NVENC0"; 905 case NV_PFAULT_CLIENT_HUB_NVDEC4: 906 return "HUBCLIENT_NVDEC4"; 907 default: 908 return "UNRECOGNIZED_CLIENT"; 909 910 } 911 } 912